repo_name
string
path
string
copies
string
size
string
content
string
license
string
nishantjr/pjproject
pjsip-apps/src/samples/vid_streamutil.c
22
28011
/* $Id$ */ /* * Copyright (C) 2011 Teluu Inc. (http://www.teluu.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * \page page_pjmedia_samples_vid_streamutil_c Samples: Video Streaming * * This example mainly demonstrates how to stream video to remote * peer using RTP. * * This file is pjsip-apps/src/samples/vid_streamutil.c * * \includelineno vid_streamutil.c */ #include <pjlib.h> #include <pjlib-util.h> #include <pjmedia.h> #include <pjmedia-codec.h> #include <pjmedia/transport_srtp.h> #if defined(PJMEDIA_HAS_VIDEO) && (PJMEDIA_HAS_VIDEO != 0) #include <stdlib.h> /* atoi() */ #include <stdio.h> #include "util.h" static const char *desc = " vid_streamutil \n" "\n" " PURPOSE: \n" " Demonstrate how to use pjmedia video stream component to \n" " transmit/receive RTP packets to/from video device/file. \n" "\n" "\n" " USAGE: \n" " vid_streamutil [options] \n" "\n" "\n" " Options: \n" " --codec=CODEC Set the codec name. \n" " --local-port=PORT Set local RTP port (default=4000) \n" " --remote=IP:PORT Set the remote peer. If this option is set, \n" " the program will transmit RTP audio to the \n" " specified address. (default: recv only) \n" " --play-file=AVI Send video from the AVI file instead of from \n" " the video device. \n" " --send-recv Set stream direction to bidirectional. \n" " --send-only Set stream direction to send only \n" " --recv-only Set stream direction to recv only (default) \n" " --send-width Video width to be sent \n" " --send-height Video height to be sent \n" " --send-width and --send-height not applicable \n" " for file streaming (see --play-file) \n" " --send-pt Payload type for sending \n" " --recv-pt Payload type for receiving \n" #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) " --use-srtp[=NAME] Enable SRTP with crypto suite NAME \n" " e.g: AES_CM_128_HMAC_SHA1_80 (default), \n" " AES_CM_128_HMAC_SHA1_32 \n" " Use this option along with the TX & RX keys, \n" " formated of 60 hex digits (e.g: E148DA..) \n" " --srtp-tx-key SRTP key for transmiting \n" " --srtp-rx-key SRTP key for receiving \n" #endif "\n" ; #define THIS_FILE "vid_streamutil.c" /* If set, local renderer will be created to play original file */ #define HAS_LOCAL_RENDERER_FOR_PLAY_FILE 1 /* Default width and height for the renderer, better be set to maximum * acceptable size. */ #define DEF_RENDERER_WIDTH 640 #define DEF_RENDERER_HEIGHT 480 /* Prototype */ static void print_stream_stat(pjmedia_vid_stream *stream, const pjmedia_vid_codec_param *codec_param); /* Prototype for LIBSRTP utility in file datatypes.c */ int hex_string_to_octet_string(char *raw, char *hex, int len); /* * Register all codecs. */ static pj_status_t init_codecs(pj_pool_factory *pf) { pj_status_t status; /* To suppress warning about unused var when all codecs are disabled */ PJ_UNUSED_ARG(status); #if defined(PJMEDIA_HAS_OPENH264_CODEC) && PJMEDIA_HAS_OPENH264_CODEC != 0 status = pjmedia_codec_openh264_vid_init(NULL, pf); PJ_ASSERT_RETURN(status == PJ_SUCCESS, status); #endif #if defined(PJMEDIA_HAS_FFMPEG_VID_CODEC) && PJMEDIA_HAS_FFMPEG_VID_CODEC != 0 status = pjmedia_codec_ffmpeg_vid_init(NULL, pf); PJ_ASSERT_RETURN(status == PJ_SUCCESS, status); #endif return PJ_SUCCESS; } /* * Register all codecs. */ static void deinit_codecs() { #if defined(PJMEDIA_HAS_FFMPEG_VID_CODEC) && PJMEDIA_HAS_FFMPEG_VID_CODEC != 0 pjmedia_codec_ffmpeg_vid_deinit(); #endif #if defined(PJMEDIA_HAS_OPENH264_CODEC) && PJMEDIA_HAS_OPENH264_CODEC != 0 pjmedia_codec_openh264_vid_deinit(); #endif } static pj_status_t create_file_player( pj_pool_t *pool, const char *file_name, pjmedia_port **p_play_port) { pjmedia_avi_streams *avi_streams; pjmedia_avi_stream *vid_stream; pjmedia_port *play_port; pj_status_t status; status = pjmedia_avi_player_create_streams(pool, file_name, 0, &avi_streams); if (status != PJ_SUCCESS) return status; vid_stream = pjmedia_avi_streams_get_stream_by_media(avi_streams, 0, PJMEDIA_TYPE_VIDEO); if (!vid_stream) return PJ_ENOTFOUND; play_port = pjmedia_avi_stream_get_port(vid_stream); pj_assert(play_port); *p_play_port = play_port; return PJ_SUCCESS; } /* * Create stream based on the codec, dir, remote address, etc. */ static pj_status_t create_stream( pj_pool_t *pool, pjmedia_endpt *med_endpt, const pjmedia_vid_codec_info *codec_info, pjmedia_vid_codec_param *codec_param, pjmedia_dir dir, pj_int8_t rx_pt, pj_int8_t tx_pt, pj_uint16_t local_port, const pj_sockaddr_in *rem_addr, #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) pj_bool_t use_srtp, const pj_str_t *crypto_suite, const pj_str_t *srtp_tx_key, const pj_str_t *srtp_rx_key, #endif pjmedia_vid_stream **p_stream ) { pjmedia_vid_stream_info info; pjmedia_transport *transport = NULL; pj_status_t status; #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) pjmedia_transport *srtp_tp = NULL; #endif /* Reset stream info. */ pj_bzero(&info, sizeof(info)); /* Initialize stream info formats */ info.type = PJMEDIA_TYPE_VIDEO; info.dir = dir; info.codec_info = *codec_info; info.tx_pt = (tx_pt == -1)? codec_info->pt : tx_pt; info.rx_pt = (rx_pt == -1)? codec_info->pt : rx_pt; info.ssrc = pj_rand(); if (codec_param) info.codec_param = codec_param; /* Copy remote address */ pj_memcpy(&info.rem_addr, rem_addr, sizeof(pj_sockaddr_in)); /* If remote address is not set, set to an arbitrary address * (otherwise stream will assert). */ if (info.rem_addr.addr.sa_family == 0) { const pj_str_t addr = pj_str("127.0.0.1"); pj_sockaddr_in_init(&info.rem_addr.ipv4, &addr, 0); } /* Create media transport */ status = pjmedia_transport_udp_create(med_endpt, NULL, local_port, 0, &transport); if (status != PJ_SUCCESS) return status; #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) /* Check if SRTP enabled */ if (use_srtp) { pjmedia_srtp_crypto tx_plc, rx_plc; status = pjmedia_transport_srtp_create(med_endpt, transport, NULL, &srtp_tp); if (status != PJ_SUCCESS) return status; pj_bzero(&tx_plc, sizeof(pjmedia_srtp_crypto)); pj_bzero(&rx_plc, sizeof(pjmedia_srtp_crypto)); tx_plc.key = *srtp_tx_key; tx_plc.name = *crypto_suite; rx_plc.key = *srtp_rx_key; rx_plc.name = *crypto_suite; status = pjmedia_transport_srtp_start(srtp_tp, &tx_plc, &rx_plc); if (status != PJ_SUCCESS) return status; transport = srtp_tp; } #endif /* Now that the stream info is initialized, we can create the * stream. */ status = pjmedia_vid_stream_create( med_endpt, pool, &info, transport, NULL, p_stream); if (status != PJ_SUCCESS) { app_perror(THIS_FILE, "Error creating stream", status); pjmedia_transport_close(transport); return status; } return PJ_SUCCESS; } typedef struct play_file_data { const char *file_name; pjmedia_port *play_port; pjmedia_port *stream_port; pjmedia_vid_codec *decoder; pjmedia_port *renderer; void *read_buf; pj_size_t read_buf_size; void *dec_buf; pj_size_t dec_buf_size; } play_file_data; static void clock_cb(const pj_timestamp *ts, void *user_data) { play_file_data *play_file = (play_file_data*)user_data; pjmedia_frame read_frame, write_frame; pj_status_t status; PJ_UNUSED_ARG(ts); /* Read frame from file */ read_frame.buf = play_file->read_buf; read_frame.size = play_file->read_buf_size; pjmedia_port_get_frame(play_file->play_port, &read_frame); /* Decode frame, if needed */ if (play_file->decoder) { pjmedia_vid_codec *decoder = play_file->decoder; write_frame.buf = play_file->dec_buf; write_frame.size = play_file->dec_buf_size; status = pjmedia_vid_codec_decode(decoder, 1, &read_frame, (unsigned)write_frame.size, &write_frame); if (status != PJ_SUCCESS) return; } else { write_frame = read_frame; } /* Display frame locally */ if (play_file->renderer) pjmedia_port_put_frame(play_file->renderer, &write_frame); /* Send frame */ pjmedia_port_put_frame(play_file->stream_port, &write_frame); } /* * usage() */ static void usage() { puts(desc); } /* * main() */ int main(int argc, char *argv[]) { pj_caching_pool cp; pjmedia_endpt *med_endpt; pj_pool_t *pool; pjmedia_vid_stream *stream = NULL; pjmedia_port *enc_port, *dec_port; pj_status_t status; pjmedia_vid_port *capture=NULL, *renderer=NULL; pjmedia_vid_port_param vpp; #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) /* SRTP variables */ pj_bool_t use_srtp = PJ_FALSE; char tmp_tx_key[64]; char tmp_rx_key[64]; pj_str_t srtp_tx_key = {NULL, 0}; pj_str_t srtp_rx_key = {NULL, 0}; pj_str_t srtp_crypto_suite = {NULL, 0}; int tmp_key_len; #endif /* Default values */ const pjmedia_vid_codec_info *codec_info; pjmedia_vid_codec_param codec_param; pjmedia_dir dir = PJMEDIA_DIR_DECODING; pj_sockaddr_in remote_addr; pj_uint16_t local_port = 4000; char *codec_id = NULL; pjmedia_rect_size tx_size = {0}; pj_int8_t rx_pt = -1, tx_pt = -1; play_file_data play_file = { NULL }; pjmedia_port *play_port = NULL; pjmedia_vid_codec *play_decoder = NULL; pjmedia_clock *play_clock = NULL; enum { OPT_CODEC = 'c', OPT_LOCAL_PORT = 'p', OPT_REMOTE = 'r', OPT_PLAY_FILE = 'f', OPT_SEND_RECV = 'b', OPT_SEND_ONLY = 's', OPT_RECV_ONLY = 'i', OPT_SEND_WIDTH = 'W', OPT_SEND_HEIGHT = 'H', OPT_RECV_PT = 't', OPT_SEND_PT = 'T', #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) OPT_USE_SRTP = 'S', #endif OPT_SRTP_TX_KEY = 'x', OPT_SRTP_RX_KEY = 'y', OPT_HELP = 'h', }; struct pj_getopt_option long_options[] = { { "codec", 1, 0, OPT_CODEC }, { "local-port", 1, 0, OPT_LOCAL_PORT }, { "remote", 1, 0, OPT_REMOTE }, { "play-file", 1, 0, OPT_PLAY_FILE }, { "send-recv", 0, 0, OPT_SEND_RECV }, { "send-only", 0, 0, OPT_SEND_ONLY }, { "recv-only", 0, 0, OPT_RECV_ONLY }, { "send-width", 1, 0, OPT_SEND_WIDTH }, { "send-height", 1, 0, OPT_SEND_HEIGHT }, { "recv-pt", 1, 0, OPT_RECV_PT }, { "send-pt", 1, 0, OPT_SEND_PT }, #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) { "use-srtp", 2, 0, OPT_USE_SRTP }, { "srtp-tx-key", 1, 0, OPT_SRTP_TX_KEY }, { "srtp-rx-key", 1, 0, OPT_SRTP_RX_KEY }, #endif { "help", 0, 0, OPT_HELP }, { NULL, 0, 0, 0 }, }; int c; int option_index; pj_bzero(&remote_addr, sizeof(remote_addr)); /* init PJLIB : */ status = pj_init(); PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1); /* Parse arguments */ pj_optind = 0; while((c=pj_getopt_long(argc,argv, "h", long_options, &option_index))!=-1) { switch (c) { case OPT_CODEC: codec_id = pj_optarg; break; case OPT_LOCAL_PORT: local_port = (pj_uint16_t) atoi(pj_optarg); if (local_port < 1) { printf("Error: invalid local port %s\n", pj_optarg); return 1; } break; case OPT_REMOTE: { pj_str_t ip = pj_str(strtok(pj_optarg, ":")); pj_uint16_t port = (pj_uint16_t) atoi(strtok(NULL, ":")); status = pj_sockaddr_in_init(&remote_addr, &ip, port); if (status != PJ_SUCCESS) { app_perror(THIS_FILE, "Invalid remote address", status); return 1; } } break; case OPT_PLAY_FILE: play_file.file_name = pj_optarg; break; case OPT_SEND_RECV: dir = PJMEDIA_DIR_ENCODING_DECODING; break; case OPT_SEND_ONLY: dir = PJMEDIA_DIR_ENCODING; break; case OPT_RECV_ONLY: dir = PJMEDIA_DIR_DECODING; break; case OPT_SEND_WIDTH: tx_size.w = (unsigned)atoi(pj_optarg); break; case OPT_SEND_HEIGHT: tx_size.h = (unsigned)atoi(pj_optarg); break; case OPT_RECV_PT: rx_pt = (pj_int8_t)atoi(pj_optarg); break; case OPT_SEND_PT: tx_pt = (pj_int8_t)atoi(pj_optarg); break; #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) case OPT_USE_SRTP: use_srtp = PJ_TRUE; if (pj_optarg) { pj_strset(&srtp_crypto_suite, pj_optarg, strlen(pj_optarg)); } else { srtp_crypto_suite = pj_str("AES_CM_128_HMAC_SHA1_80"); } break; case OPT_SRTP_TX_KEY: tmp_key_len = hex_string_to_octet_string(tmp_tx_key, pj_optarg, (int)strlen(pj_optarg)); pj_strset(&srtp_tx_key, tmp_tx_key, tmp_key_len/2); break; case OPT_SRTP_RX_KEY: tmp_key_len = hex_string_to_octet_string(tmp_rx_key, pj_optarg, (int)strlen(pj_optarg)); pj_strset(&srtp_rx_key, tmp_rx_key, tmp_key_len/2); break; #endif case OPT_HELP: usage(); return 1; default: printf("Invalid options %s\n", argv[pj_optind]); return 1; } } /* Verify arguments. */ if (dir & PJMEDIA_DIR_ENCODING) { if (remote_addr.sin_addr.s_addr == 0) { printf("Error: remote address must be set\n"); return 1; } } if (play_file.file_name != NULL && dir != PJMEDIA_DIR_ENCODING) { printf("Direction is set to --send-only because of --play-file\n"); dir = PJMEDIA_DIR_ENCODING; } #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) /* SRTP validation */ if (use_srtp) { if (!srtp_tx_key.slen || !srtp_rx_key.slen) { printf("Error: Key for each SRTP stream direction must be set\n"); return 1; } } #endif /* Must create a pool factory before we can allocate any memory. */ pj_caching_pool_init(&cp, &pj_pool_factory_default_policy, 0); /* * Initialize media endpoint. * This will implicitly initialize PJMEDIA too. */ status = pjmedia_endpt_create(&cp.factory, NULL, 1, &med_endpt); PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1); /* Create memory pool for application purpose */ pool = pj_pool_create( &cp.factory, /* pool factory */ "app", /* pool name. */ 4000, /* init size */ 4000, /* increment size */ NULL /* callback on error */ ); /* Init video format manager */ pjmedia_video_format_mgr_create(pool, 64, 0, NULL); /* Init video converter manager */ pjmedia_converter_mgr_create(pool, NULL); /* Init event manager */ pjmedia_event_mgr_create(pool, 0, NULL); /* Init video codec manager */ pjmedia_vid_codec_mgr_create(pool, NULL); /* Init video subsystem */ pjmedia_vid_dev_subsys_init(&cp.factory); /* Register all supported codecs */ status = init_codecs(&cp.factory); PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1); /* Find which codec to use. */ if (codec_id) { unsigned count = 1; pj_str_t str_codec_id = pj_str(codec_id); status = pjmedia_vid_codec_mgr_find_codecs_by_id(NULL, &str_codec_id, &count, &codec_info, NULL); if (status != PJ_SUCCESS) { printf("Error: unable to find codec %s\n", codec_id); return 1; } } else { static pjmedia_vid_codec_info info[1]; unsigned count = PJ_ARRAY_SIZE(info); /* Default to first codec */ pjmedia_vid_codec_mgr_enum_codecs(NULL, &count, info, NULL); codec_info = &info[0]; } /* Get codec default param for info */ status = pjmedia_vid_codec_mgr_get_default_param(NULL, codec_info, &codec_param); pj_assert(status == PJ_SUCCESS); /* Set outgoing video size */ if (tx_size.w && tx_size.h) codec_param.enc_fmt.det.vid.size = tx_size; #if DEF_RENDERER_WIDTH && DEF_RENDERER_HEIGHT /* Set incoming video size */ if (DEF_RENDERER_WIDTH > codec_param.dec_fmt.det.vid.size.w) codec_param.dec_fmt.det.vid.size.w = DEF_RENDERER_WIDTH; if (DEF_RENDERER_HEIGHT > codec_param.dec_fmt.det.vid.size.h) codec_param.dec_fmt.det.vid.size.h = DEF_RENDERER_HEIGHT; #endif if (play_file.file_name) { pjmedia_video_format_detail *file_vfd; pjmedia_clock_param clock_param; char fmt_name[5]; /* Create file player */ status = create_file_player(pool, play_file.file_name, &play_port); if (status != PJ_SUCCESS) goto on_exit; /* Collect format info */ file_vfd = pjmedia_format_get_video_format_detail(&play_port->info.fmt, PJ_TRUE); PJ_LOG(2, (THIS_FILE, "Reading video stream %dx%d %s @%.2ffps", file_vfd->size.w, file_vfd->size.h, pjmedia_fourcc_name(play_port->info.fmt.id, fmt_name), (1.0*file_vfd->fps.num/file_vfd->fps.denum))); /* Allocate file read buffer */ play_file.read_buf_size = PJMEDIA_MAX_VIDEO_ENC_FRAME_SIZE; play_file.read_buf = pj_pool_zalloc(pool, play_file.read_buf_size); /* Create decoder, if the file and the stream uses different codec */ if (codec_info->fmt_id != (pjmedia_format_id)play_port->info.fmt.id) { const pjmedia_video_format_info *dec_vfi; pjmedia_video_apply_fmt_param dec_vafp = {0}; const pjmedia_vid_codec_info *codec_info2; pjmedia_vid_codec_param codec_param2; /* Find decoder */ status = pjmedia_vid_codec_mgr_get_codec_info2(NULL, play_port->info.fmt.id, &codec_info2); if (status != PJ_SUCCESS) goto on_exit; /* Init decoder */ status = pjmedia_vid_codec_mgr_alloc_codec(NULL, codec_info2, &play_decoder); if (status != PJ_SUCCESS) goto on_exit; status = play_decoder->op->init(play_decoder, pool); if (status != PJ_SUCCESS) goto on_exit; /* Open decoder */ status = pjmedia_vid_codec_mgr_get_default_param(NULL, codec_info2, &codec_param2); if (status != PJ_SUCCESS) goto on_exit; codec_param2.dir = PJMEDIA_DIR_DECODING; status = play_decoder->op->open(play_decoder, &codec_param2); if (status != PJ_SUCCESS) goto on_exit; /* Get decoder format info and apply param */ dec_vfi = pjmedia_get_video_format_info(NULL, codec_info2->dec_fmt_id[0]); if (!dec_vfi || !dec_vfi->apply_fmt) { status = PJ_ENOTSUP; goto on_exit; } dec_vafp.size = file_vfd->size; (*dec_vfi->apply_fmt)(dec_vfi, &dec_vafp); /* Allocate buffer to receive decoder output */ play_file.dec_buf_size = dec_vafp.framebytes; play_file.dec_buf = pj_pool_zalloc(pool, play_file.dec_buf_size); } /* Create player clock */ clock_param.usec_interval = PJMEDIA_PTIME(&file_vfd->fps); clock_param.clock_rate = codec_info->clock_rate; status = pjmedia_clock_create2(pool, &clock_param, PJMEDIA_CLOCK_NO_HIGHEST_PRIO, &clock_cb, &play_file, &play_clock); if (status != PJ_SUCCESS) goto on_exit; /* Override stream codec param for encoding direction */ codec_param.enc_fmt.det.vid.size = file_vfd->size; codec_param.enc_fmt.det.vid.fps = file_vfd->fps; } else { pjmedia_vid_port_param_default(&vpp); /* Set as active for all video devices */ vpp.active = PJ_TRUE; /* Create video device port. */ if (dir & PJMEDIA_DIR_ENCODING) { /* Create capture */ status = pjmedia_vid_dev_default_param( pool, PJMEDIA_VID_DEFAULT_CAPTURE_DEV, &vpp.vidparam); if (status != PJ_SUCCESS) goto on_exit; pjmedia_format_copy(&vpp.vidparam.fmt, &codec_param.enc_fmt); vpp.vidparam.fmt.id = codec_param.dec_fmt.id; vpp.vidparam.dir = PJMEDIA_DIR_CAPTURE; status = pjmedia_vid_port_create(pool, &vpp, &capture); if (status != PJ_SUCCESS) goto on_exit; } if (dir & PJMEDIA_DIR_DECODING) { /* Create renderer */ status = pjmedia_vid_dev_default_param( pool, PJMEDIA_VID_DEFAULT_RENDER_DEV, &vpp.vidparam); if (status != PJ_SUCCESS) goto on_exit; pjmedia_format_copy(&vpp.vidparam.fmt, &codec_param.dec_fmt); vpp.vidparam.dir = PJMEDIA_DIR_RENDER; vpp.vidparam.disp_size = vpp.vidparam.fmt.det.vid.size; vpp.vidparam.flags |= PJMEDIA_VID_DEV_CAP_OUTPUT_WINDOW_FLAGS; vpp.vidparam.window_flags = PJMEDIA_VID_DEV_WND_BORDER | PJMEDIA_VID_DEV_WND_RESIZABLE; status = pjmedia_vid_port_create(pool, &vpp, &renderer); if (status != PJ_SUCCESS) goto on_exit; } } /* Set to ignore fmtp */ codec_param.ignore_fmtp = PJ_TRUE; /* Create stream based on program arguments */ status = create_stream(pool, med_endpt, codec_info, &codec_param, dir, rx_pt, tx_pt, local_port, &remote_addr, #if defined(PJMEDIA_HAS_SRTP) && (PJMEDIA_HAS_SRTP != 0) use_srtp, &srtp_crypto_suite, &srtp_tx_key, &srtp_rx_key, #endif &stream); if (status != PJ_SUCCESS) goto on_exit; /* Get the port interface of the stream */ status = pjmedia_vid_stream_get_port(stream, PJMEDIA_DIR_ENCODING, &enc_port); PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1); status = pjmedia_vid_stream_get_port(stream, PJMEDIA_DIR_DECODING, &dec_port); PJ_ASSERT_RETURN(status == PJ_SUCCESS, 1); /* Start streaming */ status = pjmedia_vid_stream_start(stream); if (status != PJ_SUCCESS) goto on_exit; /* Start renderer */ if (renderer) { status = pjmedia_vid_port_connect(renderer, dec_port, PJ_FALSE); if (status != PJ_SUCCESS) goto on_exit; status = pjmedia_vid_port_start(renderer); if (status != PJ_SUCCESS) goto on_exit; } /* Start capture */ if (capture) { status = pjmedia_vid_port_connect(capture, enc_port, PJ_FALSE); if (status != PJ_SUCCESS) goto on_exit; status = pjmedia_vid_port_start(capture); if (status != PJ_SUCCESS) goto on_exit; } /* Start playing file */ if (play_file.file_name) { #if HAS_LOCAL_RENDERER_FOR_PLAY_FILE /* Create local renderer */ pjmedia_vid_port_param_default(&vpp); vpp.active = PJ_FALSE; status = pjmedia_vid_dev_default_param( pool, PJMEDIA_VID_DEFAULT_RENDER_DEV, &vpp.vidparam); if (status != PJ_SUCCESS) goto on_exit; vpp.vidparam.dir = PJMEDIA_DIR_RENDER; pjmedia_format_copy(&vpp.vidparam.fmt, &codec_param.dec_fmt); vpp.vidparam.fmt.det.vid.size = play_port->info.fmt.det.vid.size; vpp.vidparam.fmt.det.vid.fps = play_port->info.fmt.det.vid.fps; vpp.vidparam.disp_size = vpp.vidparam.fmt.det.vid.size; vpp.vidparam.flags |= PJMEDIA_VID_DEV_CAP_OUTPUT_WINDOW_FLAGS; vpp.vidparam.window_flags = PJMEDIA_VID_DEV_WND_BORDER | PJMEDIA_VID_DEV_WND_RESIZABLE; status = pjmedia_vid_port_create(pool, &vpp, &renderer); if (status != PJ_SUCCESS) goto on_exit; status = pjmedia_vid_port_start(renderer); if (status != PJ_SUCCESS) goto on_exit; #endif /* Init play file data */ play_file.play_port = play_port; play_file.stream_port = enc_port; play_file.decoder = play_decoder; if (renderer) { play_file.renderer = pjmedia_vid_port_get_passive_port(renderer); } status = pjmedia_clock_start(play_clock); if (status != PJ_SUCCESS) goto on_exit; } /* Done */ if (dir == PJMEDIA_DIR_DECODING) printf("Stream is active, dir is recv-only, local port is %d\n", local_port); else if (dir == PJMEDIA_DIR_ENCODING) printf("Stream is active, dir is send-only, sending to %s:%d\n", pj_inet_ntoa(remote_addr.sin_addr), pj_ntohs(remote_addr.sin_port)); else printf("Stream is active, send/recv, local port is %d, " "sending to %s:%d\n", local_port, pj_inet_ntoa(remote_addr.sin_addr), pj_ntohs(remote_addr.sin_port)); if (dir & PJMEDIA_DIR_ENCODING) PJ_LOG(2, (THIS_FILE, "Sending %dx%d %.*s @%.2ffps", codec_param.enc_fmt.det.vid.size.w, codec_param.enc_fmt.det.vid.size.h, codec_info->encoding_name.slen, codec_info->encoding_name.ptr, (1.0*codec_param.enc_fmt.det.vid.fps.num/ codec_param.enc_fmt.det.vid.fps.denum))); for (;;) { char tmp[10]; puts(""); puts("Commands:"); puts(" q Quit"); puts(""); printf("Command: "); fflush(stdout); if (fgets(tmp, sizeof(tmp), stdin) == NULL) { puts("EOF while reading stdin, will quit now.."); break; } if (tmp[0] == 'q') break; } /* Start deinitialization: */ on_exit: /* Stop video devices */ if (capture) pjmedia_vid_port_stop(capture); if (renderer) pjmedia_vid_port_stop(renderer); /* Stop and destroy file clock */ if (play_clock) { pjmedia_clock_stop(play_clock); pjmedia_clock_destroy(play_clock); } /* Destroy file reader/player */ if (play_port) pjmedia_port_destroy(play_port); /* Destroy file decoder */ if (play_decoder) { play_decoder->op->close(play_decoder); pjmedia_vid_codec_mgr_dealloc_codec(NULL, play_decoder); } /* Destroy video devices */ if (capture) pjmedia_vid_port_destroy(capture); if (renderer) pjmedia_vid_port_destroy(renderer); /* Destroy stream */ if (stream) { pjmedia_transport *tp; tp = pjmedia_vid_stream_get_transport(stream); pjmedia_vid_stream_destroy(stream); pjmedia_transport_close(tp); } /* Deinit codecs */ deinit_codecs(); /* Shutdown video subsystem */ pjmedia_vid_dev_subsys_shutdown(); /* Destroy event manager */ pjmedia_event_mgr_destroy(NULL); /* Release application pool */ pj_pool_release( pool ); /* Destroy media endpoint. */ pjmedia_endpt_destroy( med_endpt ); /* Destroy pool factory */ pj_caching_pool_destroy( &cp ); /* Shutdown PJLIB */ pj_shutdown(); return (status == PJ_SUCCESS) ? 0 : 1; } #else int main(int argc, char *argv[]) { PJ_UNUSED_ARG(argc); PJ_UNUSED_ARG(argv); puts("Error: this sample requires video capability (PJMEDIA_HAS_VIDEO == 1)"); return -1; } #endif /* PJMEDIA_HAS_VIDEO */
gpl-2.0
jackie87/FAST.2.2
GSL/gsl-1.15/specfunc/hyperg_2F1.c
22
28802
/* specfunc/hyperg_2F1.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000, 2004 Gerard Jungman * Copyright (C) 2009 Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Author: G. Jungman */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_sf_exp.h> #include <gsl/gsl_sf_pow_int.h> #include <gsl/gsl_sf_gamma.h> #include <gsl/gsl_sf_psi.h> #include <gsl/gsl_sf_hyperg.h> #include "error.h" #define locEPS (1000.0*GSL_DBL_EPSILON) /* Assumes c != negative integer. */ static int hyperg_2F1_series(const double a, const double b, const double c, const double x, gsl_sf_result * result ) { double sum_pos = 1.0; double sum_neg = 0.0; double del_pos = 1.0; double del_neg = 0.0; double del = 1.0; double k = 0.0; int i = 0; if(fabs(c) < GSL_DBL_EPSILON) { result->val = 0.0; /* FIXME: ?? */ result->err = 1.0; GSL_ERROR ("error", GSL_EDOM); } do { if(++i > 30000) { result->val = sum_pos - sum_neg; result->err = del_pos + del_neg; result->err += 2.0 * GSL_DBL_EPSILON * (sum_pos + sum_neg); result->err += 2.0 * GSL_DBL_EPSILON * (2.0*sqrt(k)+1.0) * fabs(result->val); GSL_ERROR ("error", GSL_EMAXITER); } del *= (a+k)*(b+k) * x / ((c+k) * (k+1.0)); /* Gauss series */ if(del > 0.0) { del_pos = del; sum_pos += del; } else if(del == 0.0) { /* Exact termination (a or b was a negative integer). */ del_pos = 0.0; del_neg = 0.0; break; } else { del_neg = -del; sum_neg -= del; } k += 1.0; } while(fabs((del_pos + del_neg)/(sum_pos-sum_neg)) > GSL_DBL_EPSILON); result->val = sum_pos - sum_neg; result->err = del_pos + del_neg; result->err += 2.0 * GSL_DBL_EPSILON * (sum_pos + sum_neg); result->err += 2.0 * GSL_DBL_EPSILON * (2.0*sqrt(k) + 1.0) * fabs(result->val); return GSL_SUCCESS; } /* a = aR + i aI, b = aR - i aI */ static int hyperg_2F1_conj_series(const double aR, const double aI, const double c, double x, gsl_sf_result * result) { if(c == 0.0) { result->val = 0.0; /* FIXME: should be Inf */ result->err = 0.0; GSL_ERROR ("error", GSL_EDOM); } else { double sum_pos = 1.0; double sum_neg = 0.0; double del_pos = 1.0; double del_neg = 0.0; double del = 1.0; double k = 0.0; do { del *= ((aR+k)*(aR+k) + aI*aI)/((k+1.0)*(c+k)) * x; if(del >= 0.0) { del_pos = del; sum_pos += del; } else { del_neg = -del; sum_neg -= del; } if(k > 30000) { result->val = sum_pos - sum_neg; result->err = del_pos + del_neg; result->err += 2.0 * GSL_DBL_EPSILON * (sum_pos + sum_neg); result->err += 2.0 * GSL_DBL_EPSILON * (2.0*sqrt(k)+1.0) * fabs(result->val); GSL_ERROR ("error", GSL_EMAXITER); } k += 1.0; } while(fabs((del_pos + del_neg)/(sum_pos - sum_neg)) > GSL_DBL_EPSILON); result->val = sum_pos - sum_neg; result->err = del_pos + del_neg; result->err += 2.0 * GSL_DBL_EPSILON * (sum_pos + sum_neg); result->err += 2.0 * GSL_DBL_EPSILON * (2.0*sqrt(k) + 1.0) * fabs(result->val); return GSL_SUCCESS; } } /* Luke's rational approximation. The most accesible * discussion is in [Kolbig, CPC 23, 51 (1981)]. * The convergence is supposedly guaranteed for x < 0. * You have to read Luke's books to see this and other * results. Unfortunately, the stability is not so * clear to me, although it seems very efficient when * it works. */ static int hyperg_2F1_luke(const double a, const double b, const double c, const double xin, gsl_sf_result * result) { int stat_iter; const double RECUR_BIG = 1.0e+50; const int nmax = 20000; int n = 3; const double x = -xin; const double x3 = x*x*x; const double t0 = a*b/c; const double t1 = (a+1.0)*(b+1.0)/(2.0*c); const double t2 = (a+2.0)*(b+2.0)/(2.0*(c+1.0)); double F = 1.0; double prec; double Bnm3 = 1.0; /* B0 */ double Bnm2 = 1.0 + t1 * x; /* B1 */ double Bnm1 = 1.0 + t2 * x * (1.0 + t1/3.0 * x); /* B2 */ double Anm3 = 1.0; /* A0 */ double Anm2 = Bnm2 - t0 * x; /* A1 */ double Anm1 = Bnm1 - t0*(1.0 + t2*x)*x + t0 * t1 * (c/(c+1.0)) * x*x; /* A2 */ while(1) { double npam1 = n + a - 1; double npbm1 = n + b - 1; double npcm1 = n + c - 1; double npam2 = n + a - 2; double npbm2 = n + b - 2; double npcm2 = n + c - 2; double tnm1 = 2*n - 1; double tnm3 = 2*n - 3; double tnm5 = 2*n - 5; double n2 = n*n; double F1 = (3.0*n2 + (a+b-6)*n + 2 - a*b - 2*(a+b)) / (2*tnm3*npcm1); double F2 = -(3.0*n2 - (a+b+6)*n + 2 - a*b)*npam1*npbm1/(4*tnm1*tnm3*npcm2*npcm1); double F3 = (npam2*npam1*npbm2*npbm1*(n-a-2)*(n-b-2)) / (8*tnm3*tnm3*tnm5*(n+c-3)*npcm2*npcm1); double E = -npam1*npbm1*(n-c-1) / (2*tnm3*npcm2*npcm1); double An = (1.0+F1*x)*Anm1 + (E + F2*x)*x*Anm2 + F3*x3*Anm3; double Bn = (1.0+F1*x)*Bnm1 + (E + F2*x)*x*Bnm2 + F3*x3*Bnm3; double r = An/Bn; prec = fabs((F - r)/F); F = r; if(prec < GSL_DBL_EPSILON || n > nmax) break; if(fabs(An) > RECUR_BIG || fabs(Bn) > RECUR_BIG) { An /= RECUR_BIG; Bn /= RECUR_BIG; Anm1 /= RECUR_BIG; Bnm1 /= RECUR_BIG; Anm2 /= RECUR_BIG; Bnm2 /= RECUR_BIG; Anm3 /= RECUR_BIG; Bnm3 /= RECUR_BIG; } else if(fabs(An) < 1.0/RECUR_BIG || fabs(Bn) < 1.0/RECUR_BIG) { An *= RECUR_BIG; Bn *= RECUR_BIG; Anm1 *= RECUR_BIG; Bnm1 *= RECUR_BIG; Anm2 *= RECUR_BIG; Bnm2 *= RECUR_BIG; Anm3 *= RECUR_BIG; Bnm3 *= RECUR_BIG; } n++; Bnm3 = Bnm2; Bnm2 = Bnm1; Bnm1 = Bn; Anm3 = Anm2; Anm2 = Anm1; Anm1 = An; } result->val = F; result->err = 2.0 * fabs(prec * F); result->err += 2.0 * GSL_DBL_EPSILON * (n+1.0) * fabs(F); /* FIXME: just a hack: there's a lot of shit going on here */ result->err *= 8.0 * (fabs(a) + fabs(b) + 1.0); stat_iter = (n >= nmax ? GSL_EMAXITER : GSL_SUCCESS ); return stat_iter; } /* Luke's rational approximation for the * case a = aR + i aI, b = aR - i aI. */ static int hyperg_2F1_conj_luke(const double aR, const double aI, const double c, const double xin, gsl_sf_result * result) { int stat_iter; const double RECUR_BIG = 1.0e+50; const int nmax = 10000; int n = 3; const double x = -xin; const double x3 = x*x*x; const double atimesb = aR*aR + aI*aI; const double apb = 2.0*aR; const double t0 = atimesb/c; const double t1 = (atimesb + apb + 1.0)/(2.0*c); const double t2 = (atimesb + 2.0*apb + 4.0)/(2.0*(c+1.0)); double F = 1.0; double prec; double Bnm3 = 1.0; /* B0 */ double Bnm2 = 1.0 + t1 * x; /* B1 */ double Bnm1 = 1.0 + t2 * x * (1.0 + t1/3.0 * x); /* B2 */ double Anm3 = 1.0; /* A0 */ double Anm2 = Bnm2 - t0 * x; /* A1 */ double Anm1 = Bnm1 - t0*(1.0 + t2*x)*x + t0 * t1 * (c/(c+1.0)) * x*x; /* A2 */ while(1) { double nm1 = n - 1; double nm2 = n - 2; double npam1_npbm1 = atimesb + nm1*apb + nm1*nm1; double npam2_npbm2 = atimesb + nm2*apb + nm2*nm2; double npcm1 = nm1 + c; double npcm2 = nm2 + c; double tnm1 = 2*n - 1; double tnm3 = 2*n - 3; double tnm5 = 2*n - 5; double n2 = n*n; double F1 = (3.0*n2 + (apb-6)*n + 2 - atimesb - 2*apb) / (2*tnm3*npcm1); double F2 = -(3.0*n2 - (apb+6)*n + 2 - atimesb)*npam1_npbm1/(4*tnm1*tnm3*npcm2*npcm1); double F3 = (npam2_npbm2*npam1_npbm1*(nm2*nm2 - nm2*apb + atimesb)) / (8*tnm3*tnm3*tnm5*(n+c-3)*npcm2*npcm1); double E = -npam1_npbm1*(n-c-1) / (2*tnm3*npcm2*npcm1); double An = (1.0+F1*x)*Anm1 + (E + F2*x)*x*Anm2 + F3*x3*Anm3; double Bn = (1.0+F1*x)*Bnm1 + (E + F2*x)*x*Bnm2 + F3*x3*Bnm3; double r = An/Bn; prec = fabs(F - r)/fabs(F); F = r; if(prec < GSL_DBL_EPSILON || n > nmax) break; if(fabs(An) > RECUR_BIG || fabs(Bn) > RECUR_BIG) { An /= RECUR_BIG; Bn /= RECUR_BIG; Anm1 /= RECUR_BIG; Bnm1 /= RECUR_BIG; Anm2 /= RECUR_BIG; Bnm2 /= RECUR_BIG; Anm3 /= RECUR_BIG; Bnm3 /= RECUR_BIG; } else if(fabs(An) < 1.0/RECUR_BIG || fabs(Bn) < 1.0/RECUR_BIG) { An *= RECUR_BIG; Bn *= RECUR_BIG; Anm1 *= RECUR_BIG; Bnm1 *= RECUR_BIG; Anm2 *= RECUR_BIG; Bnm2 *= RECUR_BIG; Anm3 *= RECUR_BIG; Bnm3 *= RECUR_BIG; } n++; Bnm3 = Bnm2; Bnm2 = Bnm1; Bnm1 = Bn; Anm3 = Anm2; Anm2 = Anm1; Anm1 = An; } result->val = F; result->err = 2.0 * fabs(prec * F); result->err += 2.0 * GSL_DBL_EPSILON * (n+1.0) * fabs(F); /* FIXME: see above */ result->err *= 8.0 * (fabs(aR) + fabs(aI) + 1.0); stat_iter = (n >= nmax ? GSL_EMAXITER : GSL_SUCCESS ); return stat_iter; } /* Do the reflection described in [Moshier, p. 334]. * Assumes a,b,c != neg integer. */ static int hyperg_2F1_reflect(const double a, const double b, const double c, const double x, gsl_sf_result * result) { const double d = c - a - b; const int intd = floor(d+0.5); const int d_integer = ( fabs(d - intd) < locEPS ); if(d_integer) { const double ln_omx = log(1.0 - x); const double ad = fabs(d); int stat_F2 = GSL_SUCCESS; double sgn_2; gsl_sf_result F1; gsl_sf_result F2; double d1, d2; gsl_sf_result lng_c; gsl_sf_result lng_ad2; gsl_sf_result lng_bd2; int stat_c; int stat_ad2; int stat_bd2; if(d >= 0.0) { d1 = d; d2 = 0.0; } else { d1 = 0.0; d2 = d; } stat_ad2 = gsl_sf_lngamma_e(a+d2, &lng_ad2); stat_bd2 = gsl_sf_lngamma_e(b+d2, &lng_bd2); stat_c = gsl_sf_lngamma_e(c, &lng_c); /* Evaluate F1. */ if(ad < GSL_DBL_EPSILON) { /* d = 0 */ F1.val = 0.0; F1.err = 0.0; } else { gsl_sf_result lng_ad; gsl_sf_result lng_ad1; gsl_sf_result lng_bd1; int stat_ad = gsl_sf_lngamma_e(ad, &lng_ad); int stat_ad1 = gsl_sf_lngamma_e(a+d1, &lng_ad1); int stat_bd1 = gsl_sf_lngamma_e(b+d1, &lng_bd1); if(stat_ad1 == GSL_SUCCESS && stat_bd1 == GSL_SUCCESS && stat_ad == GSL_SUCCESS) { /* Gamma functions in the denominator are ok. * Proceed with evaluation. */ int i; double sum1 = 1.0; double term = 1.0; double ln_pre1_val = lng_ad.val + lng_c.val + d2*ln_omx - lng_ad1.val - lng_bd1.val; double ln_pre1_err = lng_ad.err + lng_c.err + lng_ad1.err + lng_bd1.err + GSL_DBL_EPSILON * fabs(ln_pre1_val); int stat_e; /* Do F1 sum. */ for(i=1; i<ad; i++) { int j = i-1; term *= (a + d2 + j) * (b + d2 + j) / (1.0 + d2 + j) / i * (1.0-x); sum1 += term; } stat_e = gsl_sf_exp_mult_err_e(ln_pre1_val, ln_pre1_err, sum1, GSL_DBL_EPSILON*fabs(sum1), &F1); if(stat_e == GSL_EOVRFLW) { OVERFLOW_ERROR(result); } } else { /* Gamma functions in the denominator were not ok. * So the F1 term is zero. */ F1.val = 0.0; F1.err = 0.0; } } /* end F1 evaluation */ /* Evaluate F2. */ if(stat_ad2 == GSL_SUCCESS && stat_bd2 == GSL_SUCCESS) { /* Gamma functions in the denominator are ok. * Proceed with evaluation. */ const int maxiter = 2000; double psi_1 = -M_EULER; gsl_sf_result psi_1pd; gsl_sf_result psi_apd1; gsl_sf_result psi_bpd1; int stat_1pd = gsl_sf_psi_e(1.0 + ad, &psi_1pd); int stat_apd1 = gsl_sf_psi_e(a + d1, &psi_apd1); int stat_bpd1 = gsl_sf_psi_e(b + d1, &psi_bpd1); int stat_dall = GSL_ERROR_SELECT_3(stat_1pd, stat_apd1, stat_bpd1); double psi_val = psi_1 + psi_1pd.val - psi_apd1.val - psi_bpd1.val - ln_omx; double psi_err = psi_1pd.err + psi_apd1.err + psi_bpd1.err + GSL_DBL_EPSILON*fabs(psi_val); double fact = 1.0; double sum2_val = psi_val; double sum2_err = psi_err; double ln_pre2_val = lng_c.val + d1*ln_omx - lng_ad2.val - lng_bd2.val; double ln_pre2_err = lng_c.err + lng_ad2.err + lng_bd2.err + GSL_DBL_EPSILON * fabs(ln_pre2_val); int stat_e; int j; /* Do F2 sum. */ for(j=1; j<maxiter; j++) { /* values for psi functions use recurrence; Abramowitz+Stegun 6.3.5 */ double term1 = 1.0/(double)j + 1.0/(ad+j); double term2 = 1.0/(a+d1+j-1.0) + 1.0/(b+d1+j-1.0); double delta = 0.0; psi_val += term1 - term2; psi_err += GSL_DBL_EPSILON * (fabs(term1) + fabs(term2)); fact *= (a+d1+j-1.0)*(b+d1+j-1.0)/((ad+j)*j) * (1.0-x); delta = fact * psi_val; sum2_val += delta; sum2_err += fabs(fact * psi_err) + GSL_DBL_EPSILON*fabs(delta); if(fabs(delta) < GSL_DBL_EPSILON * fabs(sum2_val)) break; } if(j == maxiter) stat_F2 = GSL_EMAXITER; if(sum2_val == 0.0) { F2.val = 0.0; F2.err = 0.0; } else { stat_e = gsl_sf_exp_mult_err_e(ln_pre2_val, ln_pre2_err, sum2_val, sum2_err, &F2); if(stat_e == GSL_EOVRFLW) { result->val = 0.0; result->err = 0.0; GSL_ERROR ("error", GSL_EOVRFLW); } } stat_F2 = GSL_ERROR_SELECT_2(stat_F2, stat_dall); } else { /* Gamma functions in the denominator not ok. * So the F2 term is zero. */ F2.val = 0.0; F2.err = 0.0; } /* end F2 evaluation */ sgn_2 = ( GSL_IS_ODD(intd) ? -1.0 : 1.0 ); result->val = F1.val + sgn_2 * F2.val; result->err = F1.err + F2. err; result->err += 2.0 * GSL_DBL_EPSILON * (fabs(F1.val) + fabs(F2.val)); result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val); return stat_F2; } else { /* d not an integer */ gsl_sf_result pre1, pre2; double sgn1, sgn2; gsl_sf_result F1, F2; int status_F1, status_F2; /* These gamma functions appear in the denominator, so we * catch their harmless domain errors and set the terms to zero. */ gsl_sf_result ln_g1ca, ln_g1cb, ln_g2a, ln_g2b; double sgn_g1ca, sgn_g1cb, sgn_g2a, sgn_g2b; int stat_1ca = gsl_sf_lngamma_sgn_e(c-a, &ln_g1ca, &sgn_g1ca); int stat_1cb = gsl_sf_lngamma_sgn_e(c-b, &ln_g1cb, &sgn_g1cb); int stat_2a = gsl_sf_lngamma_sgn_e(a, &ln_g2a, &sgn_g2a); int stat_2b = gsl_sf_lngamma_sgn_e(b, &ln_g2b, &sgn_g2b); int ok1 = (stat_1ca == GSL_SUCCESS && stat_1cb == GSL_SUCCESS); int ok2 = (stat_2a == GSL_SUCCESS && stat_2b == GSL_SUCCESS); gsl_sf_result ln_gc, ln_gd, ln_gmd; double sgn_gc, sgn_gd, sgn_gmd; gsl_sf_lngamma_sgn_e( c, &ln_gc, &sgn_gc); gsl_sf_lngamma_sgn_e( d, &ln_gd, &sgn_gd); gsl_sf_lngamma_sgn_e(-d, &ln_gmd, &sgn_gmd); sgn1 = sgn_gc * sgn_gd * sgn_g1ca * sgn_g1cb; sgn2 = sgn_gc * sgn_gmd * sgn_g2a * sgn_g2b; if(ok1 && ok2) { double ln_pre1_val = ln_gc.val + ln_gd.val - ln_g1ca.val - ln_g1cb.val; double ln_pre2_val = ln_gc.val + ln_gmd.val - ln_g2a.val - ln_g2b.val + d*log(1.0-x); double ln_pre1_err = ln_gc.err + ln_gd.err + ln_g1ca.err + ln_g1cb.err; double ln_pre2_err = ln_gc.err + ln_gmd.err + ln_g2a.err + ln_g2b.err; if(ln_pre1_val < GSL_LOG_DBL_MAX && ln_pre2_val < GSL_LOG_DBL_MAX) { gsl_sf_exp_err_e(ln_pre1_val, ln_pre1_err, &pre1); gsl_sf_exp_err_e(ln_pre2_val, ln_pre2_err, &pre2); pre1.val *= sgn1; pre2.val *= sgn2; } else { OVERFLOW_ERROR(result); } } else if(ok1 && !ok2) { double ln_pre1_val = ln_gc.val + ln_gd.val - ln_g1ca.val - ln_g1cb.val; double ln_pre1_err = ln_gc.err + ln_gd.err + ln_g1ca.err + ln_g1cb.err; if(ln_pre1_val < GSL_LOG_DBL_MAX) { gsl_sf_exp_err_e(ln_pre1_val, ln_pre1_err, &pre1); pre1.val *= sgn1; pre2.val = 0.0; pre2.err = 0.0; } else { OVERFLOW_ERROR(result); } } else if(!ok1 && ok2) { double ln_pre2_val = ln_gc.val + ln_gmd.val - ln_g2a.val - ln_g2b.val + d*log(1.0-x); double ln_pre2_err = ln_gc.err + ln_gmd.err + ln_g2a.err + ln_g2b.err; if(ln_pre2_val < GSL_LOG_DBL_MAX) { pre1.val = 0.0; pre1.err = 0.0; gsl_sf_exp_err_e(ln_pre2_val, ln_pre2_err, &pre2); pre2.val *= sgn2; } else { OVERFLOW_ERROR(result); } } else { pre1.val = 0.0; pre2.val = 0.0; UNDERFLOW_ERROR(result); } status_F1 = hyperg_2F1_series( a, b, 1.0-d, 1.0-x, &F1); status_F2 = hyperg_2F1_series(c-a, c-b, 1.0+d, 1.0-x, &F2); result->val = pre1.val*F1.val + pre2.val*F2.val; result->err = fabs(pre1.val*F1.err) + fabs(pre2.val*F2.err); result->err += fabs(pre1.err*F1.val) + fabs(pre2.err*F2.val); result->err += 2.0 * GSL_DBL_EPSILON * (fabs(pre1.val*F1.val) + fabs(pre2.val*F2.val)); result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val); return GSL_SUCCESS; } } static int pow_omx(const double x, const double p, gsl_sf_result * result) { double ln_omx; double ln_result; if(fabs(x) < GSL_ROOT5_DBL_EPSILON) { ln_omx = -x*(1.0 + x*(1.0/2.0 + x*(1.0/3.0 + x/4.0 + x*x/5.0))); } else { ln_omx = log(1.0-x); } ln_result = p * ln_omx; return gsl_sf_exp_err_e(ln_result, GSL_DBL_EPSILON * fabs(ln_result), result); } /*-*-*-*-*-*-*-*-*-*-*-* Functions with Error Codes *-*-*-*-*-*-*-*-*-*-*-*/ int gsl_sf_hyperg_2F1_e(double a, double b, const double c, const double x, gsl_sf_result * result) { const double d = c - a - b; const double rinta = floor(a + 0.5); const double rintb = floor(b + 0.5); const double rintc = floor(c + 0.5); const int a_neg_integer = ( a < 0.0 && fabs(a - rinta) < locEPS ); const int b_neg_integer = ( b < 0.0 && fabs(b - rintb) < locEPS ); const int c_neg_integer = ( c < 0.0 && fabs(c - rintc) < locEPS ); result->val = 0.0; result->err = 0.0; /* Handle x == 1.0 RJM */ if (fabs (x - 1.0) < locEPS && (c - a - b) > 0 && c != 0 && !c_neg_integer) { gsl_sf_result lngamc, lngamcab, lngamca, lngamcb; double lngamc_sgn, lngamca_sgn, lngamcb_sgn; int status; int stat1 = gsl_sf_lngamma_sgn_e (c, &lngamc, &lngamc_sgn); int stat2 = gsl_sf_lngamma_e (c - a - b, &lngamcab); int stat3 = gsl_sf_lngamma_sgn_e (c - a, &lngamca, &lngamca_sgn); int stat4 = gsl_sf_lngamma_sgn_e (c - b, &lngamcb, &lngamcb_sgn); if (stat1 != GSL_SUCCESS || stat2 != GSL_SUCCESS || stat3 != GSL_SUCCESS || stat4 != GSL_SUCCESS) { DOMAIN_ERROR (result); } status = gsl_sf_exp_err_e (lngamc.val + lngamcab.val - lngamca.val - lngamcb.val, lngamc.err + lngamcab.err + lngamca.err + lngamcb.err, result); result->val *= lngamc_sgn / (lngamca_sgn * lngamcb_sgn); return status; } if(x < -1.0 || 1.0 <= x) { DOMAIN_ERROR(result); } if(c_neg_integer) { /* If c is a negative integer, then either a or b must be a negative integer of smaller magnitude than c to ensure cancellation of the series. */ if(! (a_neg_integer && a > c + 0.1) && ! (b_neg_integer && b > c + 0.1)) { DOMAIN_ERROR(result); } } if(fabs(c-b) < locEPS || fabs(c-a) < locEPS) { return pow_omx(x, d, result); /* (1-x)^(c-a-b) */ } if(a >= 0.0 && b >= 0.0 && c >=0.0 && x >= 0.0 && x < 0.995) { /* Series has all positive definite * terms and x is not close to 1. */ return hyperg_2F1_series(a, b, c, x, result); } if(fabs(a) < 10.0 && fabs(b) < 10.0) { /* a and b are not too large, so we attempt * variations on the series summation. */ if(a_neg_integer) { return hyperg_2F1_series(rinta, b, c, x, result); } if(b_neg_integer) { return hyperg_2F1_series(a, rintb, c, x, result); } if(x < -0.25) { return hyperg_2F1_luke(a, b, c, x, result); } else if(x < 0.5) { return hyperg_2F1_series(a, b, c, x, result); } else { if(fabs(c) > 10.0) { return hyperg_2F1_series(a, b, c, x, result); } else { return hyperg_2F1_reflect(a, b, c, x, result); } } } else { /* Either a or b or both large. * Introduce some new variables ap,bp so that bp is * the larger in magnitude. */ double ap, bp; if(fabs(a) > fabs(b)) { bp = a; ap = b; } else { bp = b; ap = a; } if(x < 0.0) { /* What the hell, maybe Luke will converge. */ return hyperg_2F1_luke(a, b, c, x, result); } if(GSL_MAX_DBL(fabs(ap),1.0)*fabs(bp)*fabs(x) < 2.0*fabs(c)) { /* If c is large enough or x is small enough, * we can attempt the series anyway. */ return hyperg_2F1_series(a, b, c, x, result); } if(fabs(bp*bp*x*x) < 0.001*fabs(bp) && fabs(ap) < 10.0) { /* The famous but nearly worthless "large b" asymptotic. */ int stat = gsl_sf_hyperg_1F1_e(ap, c, bp*x, result); result->err = 0.001 * fabs(result->val); return stat; } /* We give up. */ result->val = 0.0; result->err = 0.0; GSL_ERROR ("error", GSL_EUNIMPL); } } int gsl_sf_hyperg_2F1_conj_e(const double aR, const double aI, const double c, const double x, gsl_sf_result * result) { const double ax = fabs(x); const double rintc = floor(c + 0.5); const int c_neg_integer = ( c < 0.0 && fabs(c - rintc) < locEPS ); result->val = 0.0; result->err = 0.0; if(ax >= 1.0 || c_neg_integer || c == 0.0) { DOMAIN_ERROR(result); } if( (ax < 0.25 && fabs(aR) < 20.0 && fabs(aI) < 20.0) || (c > 0.0 && x > 0.0) ) { return hyperg_2F1_conj_series(aR, aI, c, x, result); } else if(fabs(aR) < 10.0 && fabs(aI) < 10.0) { if(x < -0.25) { return hyperg_2F1_conj_luke(aR, aI, c, x, result); } else { return hyperg_2F1_conj_series(aR, aI, c, x, result); } } else { if(x < 0.0) { /* What the hell, maybe Luke will converge. */ return hyperg_2F1_conj_luke(aR, aI, c, x, result); } /* Give up. */ result->val = 0.0; result->err = 0.0; GSL_ERROR ("error", GSL_EUNIMPL); } } int gsl_sf_hyperg_2F1_renorm_e(const double a, const double b, const double c, const double x, gsl_sf_result * result ) { const double rinta = floor(a + 0.5); const double rintb = floor(b + 0.5); const double rintc = floor(c + 0.5); const int a_neg_integer = ( a < 0.0 && fabs(a - rinta) < locEPS ); const int b_neg_integer = ( b < 0.0 && fabs(b - rintb) < locEPS ); const int c_neg_integer = ( c < 0.0 && fabs(c - rintc) < locEPS ); if(c_neg_integer) { if((a_neg_integer && a > c+0.1) || (b_neg_integer && b > c+0.1)) { /* 2F1 terminates early */ result->val = 0.0; result->err = 0.0; return GSL_SUCCESS; } else { /* 2F1 does not terminate early enough, so something survives */ /* [Abramowitz+Stegun, 15.1.2] */ gsl_sf_result g1, g2, g3, g4, g5; double s1, s2, s3, s4, s5; int stat = 0; stat += gsl_sf_lngamma_sgn_e(a-c+1, &g1, &s1); stat += gsl_sf_lngamma_sgn_e(b-c+1, &g2, &s2); stat += gsl_sf_lngamma_sgn_e(a, &g3, &s3); stat += gsl_sf_lngamma_sgn_e(b, &g4, &s4); stat += gsl_sf_lngamma_sgn_e(-c+2, &g5, &s5); if(stat != 0) { DOMAIN_ERROR(result); } else { gsl_sf_result F; int stat_F = gsl_sf_hyperg_2F1_e(a-c+1, b-c+1, -c+2, x, &F); double ln_pre_val = g1.val + g2.val - g3.val - g4.val - g5.val; double ln_pre_err = g1.err + g2.err + g3.err + g4.err + g5.err; double sg = s1 * s2 * s3 * s4 * s5; int stat_e = gsl_sf_exp_mult_err_e(ln_pre_val, ln_pre_err, sg * F.val, F.err, result); return GSL_ERROR_SELECT_2(stat_e, stat_F); } } } else { /* generic c */ gsl_sf_result F; gsl_sf_result lng; double sgn; int stat_g = gsl_sf_lngamma_sgn_e(c, &lng, &sgn); int stat_F = gsl_sf_hyperg_2F1_e(a, b, c, x, &F); int stat_e = gsl_sf_exp_mult_err_e(-lng.val, lng.err, sgn*F.val, F.err, result); return GSL_ERROR_SELECT_3(stat_e, stat_F, stat_g); } } int gsl_sf_hyperg_2F1_conj_renorm_e(const double aR, const double aI, const double c, const double x, gsl_sf_result * result ) { const double rintc = floor(c + 0.5); const double rinta = floor(aR + 0.5); const int a_neg_integer = ( aR < 0.0 && fabs(aR-rinta) < locEPS && aI == 0.0); const int c_neg_integer = ( c < 0.0 && fabs(c - rintc) < locEPS ); if(c_neg_integer) { if(a_neg_integer && aR > c+0.1) { /* 2F1 terminates early */ result->val = 0.0; result->err = 0.0; return GSL_SUCCESS; } else { /* 2F1 does not terminate early enough, so something survives */ /* [Abramowitz+Stegun, 15.1.2] */ gsl_sf_result g1, g2; gsl_sf_result g3; gsl_sf_result a1, a2; int stat = 0; stat += gsl_sf_lngamma_complex_e(aR-c+1, aI, &g1, &a1); stat += gsl_sf_lngamma_complex_e(aR, aI, &g2, &a2); stat += gsl_sf_lngamma_e(-c+2.0, &g3); if(stat != 0) { DOMAIN_ERROR(result); } else { gsl_sf_result F; int stat_F = gsl_sf_hyperg_2F1_conj_e(aR-c+1, aI, -c+2, x, &F); double ln_pre_val = 2.0*(g1.val - g2.val) - g3.val; double ln_pre_err = 2.0 * (g1.err + g2.err) + g3.err; int stat_e = gsl_sf_exp_mult_err_e(ln_pre_val, ln_pre_err, F.val, F.err, result); return GSL_ERROR_SELECT_2(stat_e, stat_F); } } } else { /* generic c */ gsl_sf_result F; gsl_sf_result lng; double sgn; int stat_g = gsl_sf_lngamma_sgn_e(c, &lng, &sgn); int stat_F = gsl_sf_hyperg_2F1_conj_e(aR, aI, c, x, &F); int stat_e = gsl_sf_exp_mult_err_e(-lng.val, lng.err, sgn*F.val, F.err, result); return GSL_ERROR_SELECT_3(stat_e, stat_F, stat_g); } } /*-*-*-*-*-*-*-*-*-* Functions w/ Natural Prototypes *-*-*-*-*-*-*-*-*-*-*/ #include "eval.h" double gsl_sf_hyperg_2F1(double a, double b, double c, double x) { EVAL_RESULT(gsl_sf_hyperg_2F1_e(a, b, c, x, &result)); } double gsl_sf_hyperg_2F1_conj(double aR, double aI, double c, double x) { EVAL_RESULT(gsl_sf_hyperg_2F1_conj_e(aR, aI, c, x, &result)); } double gsl_sf_hyperg_2F1_renorm(double a, double b, double c, double x) { EVAL_RESULT(gsl_sf_hyperg_2F1_renorm_e(a, b, c, x, &result)); } double gsl_sf_hyperg_2F1_conj_renorm(double aR, double aI, double c, double x) { EVAL_RESULT(gsl_sf_hyperg_2F1_conj_renorm_e(aR, aI, c, x, &result)); }
gpl-2.0
purity-devices/android_kernel_lge_g3
drivers/leds/leds-qpnp.c
22
136945
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/spmi.h> #include <linux/qpnp/pwm.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #include <linux/delay.h> #ifdef CONFIG_MACH_LGE #include <mach/board_lge.h> #endif #ifdef CONFIG_LEDS_PM8941_EMOTIONAL #include "leds-qpnp.h" #endif #define WLED_MOD_EN_REG(base, n) (base + 0x60 + n*0x10) #define WLED_IDAC_DLY_REG(base, n) (WLED_MOD_EN_REG(base, n) + 0x01) #define WLED_FULL_SCALE_REG(base, n) (WLED_IDAC_DLY_REG(base, n) + 0x01) #define WLED_MOD_SRC_SEL_REG(base, n) (WLED_FULL_SCALE_REG(base, n) + 0x01) /* wled control registers */ #define WLED_BRIGHTNESS_CNTL_LSB(base, n) (base + 0x40 + 2*n) #define WLED_BRIGHTNESS_CNTL_MSB(base, n) (base + 0x41 + 2*n) #define WLED_MOD_CTRL_REG(base) (base + 0x46) #define WLED_SYNC_REG(base) (base + 0x47) #define WLED_FDBCK_CTRL_REG(base) (base + 0x48) #define WLED_SWITCHING_FREQ_REG(base) (base + 0x4C) #define WLED_OVP_CFG_REG(base) (base + 0x4D) #define WLED_BOOST_LIMIT_REG(base) (base + 0x4E) #define WLED_CURR_SINK_REG(base) (base + 0x4F) #define WLED_HIGH_POLE_CAP_REG(base) (base + 0x58) #define WLED_CURR_SINK_MASK 0xE0 #define WLED_CURR_SINK_SHFT 0x05 #define WLED_DISABLE_ALL_SINKS 0x00 #define WLED_SWITCH_FREQ_MASK 0x0F #define WLED_OVP_VAL_MASK 0x03 #define WLED_OVP_VAL_BIT_SHFT 0x00 #define WLED_BOOST_LIMIT_MASK 0x07 #define WLED_BOOST_LIMIT_BIT_SHFT 0x00 #define WLED_BOOST_ON 0x80 #define WLED_BOOST_OFF 0x00 #define WLED_EN_MASK 0x80 #define WLED_NO_MASK 0x00 #define WLED_CP_SELECT_MAX 0x03 #define WLED_CP_SELECT_MASK 0x02 #define WLED_USE_EXT_GEN_MOD_SRC 0x01 #define WLED_CTL_DLY_STEP 200 #define WLED_CTL_DLY_MAX 1400 #define WLED_MAX_CURR 25 #define WLED_NO_CURRENT 0x00 #define WLED_OVP_DELAY 1000 #define WLED_MSB_MASK 0x0F #define WLED_MAX_CURR_MASK 0x1F #define WLED_OP_FDBCK_MASK 0x07 #define WLED_OP_FDBCK_BIT_SHFT 0x00 #define WLED_OP_FDBCK_DEFAULT 0x00 #define WLED_MAX_LEVEL 4095 #define WLED_8_BIT_MASK 0xFF #define WLED_4_BIT_MASK 0x0F #define WLED_8_BIT_SHFT 0x08 #define WLED_MAX_DUTY_CYCLE 0xFFF #define WLED_SYNC_VAL 0x07 #define WLED_SYNC_RESET_VAL 0x00 #define PMIC_VER_8026 0x04 #define PMIC_VERSION_REG 0x0105 #define WLED_DEFAULT_STRINGS 0x01 #define WLED_DEFAULT_OVP_VAL 0x02 #define WLED_BOOST_LIM_DEFAULT 0x03 #define WLED_CP_SEL_DEFAULT 0x00 #define WLED_CTRL_DLY_DEFAULT 0x00 #define WLED_SWITCH_FREQ_DEFAULT 0x0B #define FLASH_SAFETY_TIMER(base) (base + 0x40) #define FLASH_MAX_CURR(base) (base + 0x41) #define FLASH_LED_0_CURR(base) (base + 0x42) #define FLASH_LED_1_CURR(base) (base + 0x43) #define FLASH_CLAMP_CURR(base) (base + 0x44) #define FLASH_LED_TMR_CTRL(base) (base + 0x48) #define FLASH_HEADROOM(base) (base + 0x4A) #define FLASH_STARTUP_DELAY(base) (base + 0x4B) #define FLASH_MASK_ENABLE(base) (base + 0x4C) #define FLASH_VREG_OK_FORCE(base) (base + 0x4F) #define FLASH_ENABLE_CONTROL(base) (base + 0x46) #define FLASH_LED_STROBE_CTRL(base) (base + 0x47) #define FLASH_LED_UNLOCK_SECURE(base) (base + 0xD0) #define FLASH_LED_TORCH(base) (base + 0xE4) #define FLASH_FAULT_DETECT(base) (base + 0x51) #define FLASH_PERIPHERAL_SUBTYPE(base) (base + 0x05) #define FLASH_CURRENT_RAMP(base) (base + 0x54) #ifdef CONFIG_MACH_LGE #define FLASH_VPH_PWR_DROOP(base) (base + 0x5A) #endif #define FLASH_MAX_LEVEL 0x4F #define TORCH_MAX_LEVEL 0x0F #define FLASH_NO_MASK 0x00 #define FLASH_MASK_1 0x20 #define FLASH_MASK_REG_MASK 0xE0 #define FLASH_HEADROOM_MASK 0x03 #define FLASH_SAFETY_TIMER_MASK 0x7F #define FLASH_CURRENT_MASK 0xFF #define FLASH_MAX_CURRENT_MASK 0x7F #define FLASH_TMR_MASK 0x03 #define FLASH_TMR_WATCHDOG 0x03 #define FLASH_TMR_SAFETY 0x00 #define FLASH_FAULT_DETECT_MASK 0X80 #define FLASH_HW_VREG_OK 0x40 #define FLASH_VREG_MASK 0xC0 #define FLASH_STARTUP_DLY_MASK 0x02 #define FLASH_CURRENT_RAMP_MASK 0xBF #ifdef CONFIG_MACH_LGE #define FLASH_VPH_PWR_DROOP_MASK 0xF3 #endif #define FLASH_ENABLE_ALL 0xE0 #define FLASH_ENABLE_MODULE 0x80 #define FLASH_ENABLE_MODULE_MASK 0x80 #define FLASH_DISABLE_ALL 0x00 #define FLASH_ENABLE_MASK 0xE0 #define FLASH_ENABLE_LED_0 0xC0 #define FLASH_ENABLE_LED_1 0xA0 #define FLASH_INIT_MASK 0xE0 #define FLASH_SELFCHECK_ENABLE 0x80 #define FLASH_RAMP_STEP_27US 0xBF #define FLASH_HW_SW_STROBE_SEL_MASK 0x04 #define FLASH_STROBE_MASK 0xC7 #define FLASH_LED_0_OUTPUT 0x80 #define FLASH_LED_1_OUTPUT 0x40 #define FLASH_TORCH_OUTPUT 0xC0 #define FLASH_CURRENT_PRGM_MIN 1 #define FLASH_CURRENT_PRGM_SHIFT 1 #define FLASH_CURRENT_MAX 0x4F #define FLASH_CURRENT_TORCH 0x07 #define FLASH_DURATION_200ms 0x13 #define FLASH_CLAMP_200mA 0x0F #define FLASH_TORCH_MASK 0x03 #define FLASH_LED_TORCH_ENABLE 0x00 #define FLASH_LED_TORCH_DISABLE 0x03 #define FLASH_UNLOCK_SECURE 0xA5 #define FLASH_SECURE_MASK 0xFF #define FLASH_SUBTYPE_DUAL 0x01 #define FLASH_SUBTYPE_SINGLE 0x02 #define FLASH_RAMP_UP_DELAY_US 1000 #define FLASH_RAMP_DN_DELAY_US 2160 #define LED_TRIGGER_DEFAULT "none" #define RGB_LED_SRC_SEL(base) (base + 0x45) #define RGB_LED_EN_CTL(base) (base + 0x46) #define RGB_LED_ATC_CTL(base) (base + 0x47) #define RGB_MAX_LEVEL LED_FULL #define RGB_LED_ENABLE_RED 0x80 #define RGB_LED_ENABLE_GREEN 0x40 #define RGB_LED_ENABLE_BLUE 0x20 #define RGB_LED_SOURCE_VPH_PWR 0x01 #define RGB_LED_ENABLE_MASK 0xE0 #define RGB_LED_SRC_MASK 0x03 #define QPNP_LED_PWM_FLAGS (PM_PWM_LUT_LOOP | PM_PWM_LUT_RAMP_UP) #define QPNP_LUT_RAMP_STEP_DEFAULT 255 #define PWM_LUT_MAX_SIZE 63 #define PWM_GPLED_LUT_MAX_SIZE 31 #define RGB_LED_DISABLE 0x00 #define MPP_MAX_LEVEL LED_FULL #define LED_MPP_MODE_CTRL(base) (base + 0x40) #define LED_MPP_VIN_CTRL(base) (base + 0x41) #define LED_MPP_EN_CTRL(base) (base + 0x46) #define LED_MPP_SINK_CTRL(base) (base + 0x4C) #define LED_MPP_CURRENT_MIN 5 #define LED_MPP_CURRENT_MAX 40 #define LED_MPP_VIN_CTRL_DEFAULT 0 #define LED_MPP_CURRENT_PER_SETTING 5 #define LED_MPP_SOURCE_SEL_DEFAULT LED_MPP_MODE_ENABLE #define LED_MPP_SINK_MASK 0x07 #define LED_MPP_MODE_MASK 0x7F #define LED_MPP_VIN_MASK 0x03 #define LED_MPP_EN_MASK 0x80 #define LED_MPP_SRC_MASK 0x0F #define LED_MPP_MODE_CTRL_MASK 0x70 #define LED_MPP_MODE_SINK (0x06 << 4) #define LED_MPP_MODE_ENABLE 0x01 #define LED_MPP_MODE_OUTPUT 0x10 #define LED_MPP_MODE_DISABLE 0x00 #define LED_MPP_EN_ENABLE 0x80 #define LED_MPP_EN_DISABLE 0x00 #define MPP_SOURCE_DTEST1 0x08 #define KPDBL_MAX_LEVEL LED_FULL #define KPDBL_ROW_SRC_SEL(base) (base + 0x40) #define KPDBL_ENABLE(base) (base + 0x46) #define KPDBL_ROW_SRC(base) (base + 0xE5) #define KPDBL_ROW_SRC_SEL_VAL_MASK 0x0F #define KPDBL_ROW_SCAN_EN_MASK 0x80 #define KPDBL_ROW_SCAN_VAL_MASK 0x0F #define KPDBL_ROW_SCAN_EN_SHIFT 7 #define KPDBL_MODULE_EN 0x80 #define KPDBL_MODULE_DIS 0x00 #define KPDBL_MODULE_EN_MASK 0x80 #define NUM_KPDBL_LEDS 4 #define KPDBL_MASTER_BIT_INDEX 0 #ifdef CONFIG_LEDS_PM8941_EMOTIONAL /* LGE RGB brightness tunning factors */ #define BRIGHTNESS_TUNNING 40 / 255 #endif /** * enum qpnp_leds - QPNP supported led ids * @QPNP_ID_WLED - White led backlight */ enum qpnp_leds { QPNP_ID_WLED = 0, QPNP_ID_FLASH1_LED0, QPNP_ID_FLASH1_LED1, QPNP_ID_RGB_RED, QPNP_ID_RGB_GREEN, QPNP_ID_RGB_BLUE, QPNP_ID_LED_MPP, QPNP_ID_KPDBL, QPNP_ID_MAX, }; /* current boost limit */ enum wled_current_boost_limit { WLED_CURR_LIMIT_105mA, WLED_CURR_LIMIT_385mA, WLED_CURR_LIMIT_525mA, WLED_CURR_LIMIT_805mA, WLED_CURR_LIMIT_980mA, WLED_CURR_LIMIT_1260mA, WLED_CURR_LIMIT_1400mA, WLED_CURR_LIMIT_1680mA, }; /* over voltage protection threshold */ enum wled_ovp_threshold { WLED_OVP_35V, WLED_OVP_32V, WLED_OVP_29V, WLED_OVP_27V, }; enum flash_headroom { HEADROOM_250mV = 0, HEADROOM_300mV, HEADROOM_400mV, HEADROOM_500mV, }; enum flash_startup_dly { DELAY_10us = 0, DELAY_32us, DELAY_64us, DELAY_128us, }; enum led_mode { PWM_MODE = 0, LPG_MODE, MANUAL_MODE, }; static u8 wled_debug_regs[] = { /* common registers */ 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, /* LED1 */ 0x60, 0x61, 0x62, 0x63, 0x66, /* LED2 */ 0x70, 0x71, 0x72, 0x73, 0x76, /* LED3 */ 0x80, 0x81, 0x82, 0x83, 0x86, }; static u8 flash_debug_regs[] = { 0x40, 0x41, 0x42, 0x43, 0x44, 0x48, 0x49, 0x4b, 0x4c, 0x4f, 0x46, 0x47, }; static u8 rgb_pwm_debug_regs[] = { 0x45, 0x46, 0x47, }; static u8 mpp_debug_regs[] = { 0x40, 0x41, 0x42, 0x45, 0x46, 0x4c, }; static u8 kpdbl_debug_regs[] = { 0x40, 0x46, 0xb1, 0xb3, 0xb4, 0xe5, }; /** * pwm_config_data - pwm configuration data * @lut_params - lut parameters to be used by pwm driver * @pwm_device - pwm device * @pwm_channel - pwm channel to be configured for led * @pwm_period_us - period for pwm, in us * @mode - mode the led operates in * @old_duty_pcts - storage for duty pcts that may need to be reused * @default_mode - default mode of LED as set in device tree * @use_blink - use blink sysfs entry * @blinking - device is currently blinking w/LPG mode */ struct pwm_config_data { struct lut_params lut_params; struct pwm_device *pwm_dev; int pwm_channel; #ifdef CONFIG_LEDS_PM8941_EMOTIONAL int leds_on; #endif u32 pwm_period_us; struct pwm_duty_cycles *duty_cycles; int *old_duty_pcts; u8 mode; u8 default_mode; bool use_blink; bool blinking; }; /** * wled_config_data - wled configuration data * @num_strings - number of wled strings supported * @ovp_val - over voltage protection threshold * @boost_curr_lim - boot current limit * @cp_select - high pole capacitance * @ctrl_delay_us - delay in activation of led * @dig_mod_gen_en - digital module generator * @cs_out_en - current sink output enable * @op_fdbck - selection of output as feedback for the boost */ struct wled_config_data { u8 num_strings; u8 ovp_val; u8 boost_curr_lim; u8 cp_select; u8 ctrl_delay_us; u8 switch_freq; u8 op_fdbck; u8 pmic_version; bool dig_mod_gen_en; bool cs_out_en; }; /** * mpp_config_data - mpp configuration data * @pwm_cfg - device pwm configuration * @current_setting - current setting, 5ma-40ma in 5ma increments * @source_sel - source selection * @mode_ctrl - mode control * @vin_ctrl - input control * @min_brightness - minimum brightness supported * @pwm_mode - pwm mode in use */ struct mpp_config_data { struct pwm_config_data *pwm_cfg; u8 current_setting; u8 source_sel; u8 mode_ctrl; u8 vin_ctrl; u8 min_brightness; u8 pwm_mode; }; /** * flash_config_data - flash configuration data * @current_prgm - current to be programmed, scaled by max level * @clamp_curr - clamp current to use * @headroom - headroom value to use * @duration - duration of the flash * @enable_module - enable address for particular flash * @trigger_flash - trigger flash * @startup_dly - startup delay for flash * @strobe_type - select between sw and hw strobe * @peripheral_subtype - module peripheral subtype * @current_addr - address to write for current * @second_addr - address of secondary flash to be written * @safety_timer - enable safety timer or watchdog timer * @torch_enable - enable flash LED torch mode * @flash_reg_get - flash regulator attached or not * @flash_wa_reg_get - workaround regulator attached or not * @flash_on - flash status, on or off * @torch_on - torch status, on or off * @flash_boost_reg - boost regulator for flash * @torch_boost_reg - boost regulator for torch * @flash_wa_reg - flash regulator for wa */ struct flash_config_data { u8 current_prgm; #ifdef CONFIG_LGE_DUAL_LED u8 current_prgm2; #endif u8 clamp_curr; u8 headroom; u8 duration; u8 enable_module; u8 trigger_flash; u8 startup_dly; u8 strobe_type; u8 peripheral_subtype; u16 current_addr; u16 second_addr; bool safety_timer; bool torch_enable; bool flash_reg_get; bool flash_wa_reg_get; bool flash_on; bool torch_on; struct regulator *flash_boost_reg; struct regulator *torch_boost_reg; struct regulator *flash_wa_reg; }; /** * kpdbl_config_data - kpdbl configuration data * @pwm_cfg - device pwm configuration * @mode - running mode: pwm or lut * @row_id - row id of the led * @row_src_vbst - 0 for vph_pwr and 1 for vbst * @row_src_en - enable row source * @always_on - always on row * @lut_params - lut parameters to be used by pwm driver * @duty_cycles - duty cycles for lut * @pwm_mode - pwm mode in use */ struct kpdbl_config_data { struct pwm_config_data *pwm_cfg; u32 row_id; bool row_src_vbst; bool row_src_en; bool always_on; struct pwm_duty_cycles *duty_cycles; struct lut_params lut_params; u8 pwm_mode; }; /** * rgb_config_data - rgb configuration data * @pwm_cfg - device pwm configuration * @enable - bits to enable led */ struct rgb_config_data { struct pwm_config_data *pwm_cfg; u8 enable; }; /** * struct qpnp_led_data - internal led data structure * @led_classdev - led class device * @delayed_work - delayed work for turning off the LED * @workqueue - dedicated workqueue to handle concurrency * @work - workqueue for led * @id - led index * @base_reg - base register given in device tree * @lock - to protect the transactions * @reg - cached value of led register * @num_leds - number of leds in the module * @max_current - maximum current supported by LED * @default_on - true: default state max, false, default state 0 * @turn_off_delay_ms - number of msec before turning off the LED */ struct qpnp_led_data { struct led_classdev cdev; struct spmi_device *spmi_dev; struct delayed_work dwork; struct workqueue_struct *workqueue; struct work_struct work; int id; u16 base; u8 reg; u8 num_leds; struct mutex lock; struct wled_config_data *wled_cfg; struct flash_config_data *flash_cfg; struct kpdbl_config_data *kpdbl_cfg; struct rgb_config_data *rgb_cfg; struct mpp_config_data *mpp_cfg; int max_current; bool default_on; bool in_order_command_processing; int turn_off_delay_ms; }; static DEFINE_MUTEX(flash_lock); static struct pwm_device *kpdbl_master; static u32 kpdbl_master_period_us; DECLARE_BITMAP(kpdbl_leds_in_use, NUM_KPDBL_LEDS); static bool is_kpdbl_master_turn_on; #ifdef CONFIG_LEDS_PM8941_EMOTIONAL struct qpnp_led_data *red_led; struct qpnp_led_data *green_led; struct qpnp_led_data *blue_led; struct qpnp_led_data *kpdbl_lpg1; struct qpnp_led_data *kpdbl_lpg2; static int kpdbl_brightness_flag; extern void change_led_pattern(int pattern); void rgb_luts_set(struct qpnp_led_data *led); #endif #ifdef CONFIG_MACH_LGE static struct mutex led_sequence_lock; #endif static int qpnp_led_masked_write(struct qpnp_led_data *led, u16 addr, u8 mask, u8 val) { int rc; u8 reg; rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid, addr, &reg, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read from addr=%x, rc(%d)\n", addr, rc); } reg &= ~mask; reg |= val; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, addr, &reg, 1); if (rc) dev_err(&led->spmi_dev->dev, "Unable to write to addr=%x, rc(%d)\n", addr, rc); return rc; } static void qpnp_dump_regs(struct qpnp_led_data *led, u8 regs[], u8 array_size) { int i; u8 val; pr_debug("===== %s LED register dump start =====\n", led->cdev.name); for (i = 0; i < array_size; i++) { spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid, led->base + regs[i], &val, sizeof(val)); pr_debug("%s: 0x%x = 0x%x\n", led->cdev.name, led->base + regs[i], val); } pr_debug("===== %s LED register dump end =====\n", led->cdev.name); } static int qpnp_wled_sync(struct qpnp_led_data *led) { int rc; u8 val; /* sync */ val = WLED_SYNC_VAL; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_SYNC_REG(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED set sync reg failed(%d)\n", rc); return rc; } val = WLED_SYNC_RESET_VAL; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_SYNC_REG(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED reset sync reg failed(%d)\n", rc); return rc; } return 0; } static int qpnp_wled_set(struct qpnp_led_data *led) { int rc, duty, level; u8 val, i, num_wled_strings, sink_val; num_wled_strings = led->wled_cfg->num_strings; level = led->cdev.brightness; if (level > WLED_MAX_LEVEL) level = WLED_MAX_LEVEL; if (level == 0) { for (i = 0; i < num_wled_strings; i++) { rc = qpnp_led_masked_write(led, WLED_FULL_SCALE_REG(led->base, i), WLED_MAX_CURR_MASK, WLED_NO_CURRENT); if (rc) { dev_err(&led->spmi_dev->dev, "Write max current failure (%d)\n", rc); return rc; } } rc = qpnp_wled_sync(led); if (rc) { dev_err(&led->spmi_dev->dev, "WLED sync failed(%d)\n", rc); return rc; } rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_CURR_SINK_REG(led->base), &sink_val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED read sink reg failed(%d)\n", rc); return rc; } if (led->wled_cfg->pmic_version == PMIC_VER_8026) { val = WLED_DISABLE_ALL_SINKS; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_CURR_SINK_REG(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED write sink reg failed(%d)\n", rc); return rc; } usleep(WLED_OVP_DELAY); } val = WLED_BOOST_OFF; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_MOD_CTRL_REG(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED write ctrl reg failed(%d)\n", rc); return rc; } for (i = 0; i < num_wled_strings; i++) { rc = qpnp_led_masked_write(led, WLED_FULL_SCALE_REG(led->base, i), WLED_MAX_CURR_MASK, (u8)led->max_current); if (rc) { dev_err(&led->spmi_dev->dev, "Write max current failure (%d)\n", rc); return rc; } } rc = qpnp_wled_sync(led); if (rc) { dev_err(&led->spmi_dev->dev, "WLED sync failed(%d)\n", rc); return rc; } rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_CURR_SINK_REG(led->base), &sink_val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED write sink reg failed(%d)\n", rc); return rc; } } else { val = WLED_BOOST_ON; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_MOD_CTRL_REG(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED write ctrl reg failed(%d)\n", rc); return rc; } } duty = (WLED_MAX_DUTY_CYCLE * level) / WLED_MAX_LEVEL; /* program brightness control registers */ for (i = 0; i < num_wled_strings; i++) { rc = qpnp_led_masked_write(led, WLED_BRIGHTNESS_CNTL_MSB(led->base, i), WLED_MSB_MASK, (duty >> WLED_8_BIT_SHFT) & WLED_4_BIT_MASK); if (rc) { dev_err(&led->spmi_dev->dev, "WLED set brightness MSB failed(%d)\n", rc); return rc; } val = duty & WLED_8_BIT_MASK; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, WLED_BRIGHTNESS_CNTL_LSB(led->base, i), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "WLED set brightness LSB failed(%d)\n", rc); return rc; } } rc = qpnp_wled_sync(led); if (rc) { dev_err(&led->spmi_dev->dev, "WLED sync failed(%d)\n", rc); return rc; } return 0; } static int qpnp_mpp_set(struct qpnp_led_data *led) { int rc; u8 val; int duty_us, duty_ns, period_us; if (led->cdev.brightness) { if (led->cdev.brightness < led->mpp_cfg->min_brightness) { dev_warn(&led->spmi_dev->dev, "brightness is less than supported..." \ "set to minimum supported\n"); led->cdev.brightness = led->mpp_cfg->min_brightness; } if (led->mpp_cfg->pwm_mode != MANUAL_MODE) { if (!led->mpp_cfg->pwm_cfg->blinking) { led->mpp_cfg->pwm_cfg->mode = led->mpp_cfg->pwm_cfg->default_mode; led->mpp_cfg->pwm_mode = led->mpp_cfg->pwm_cfg->default_mode; } } if (led->mpp_cfg->pwm_mode == PWM_MODE) { /*config pwm for brightness scaling*/ period_us = led->mpp_cfg->pwm_cfg->pwm_period_us; if (period_us > INT_MAX / NSEC_PER_USEC) { duty_us = (period_us * led->cdev.brightness) / LED_FULL; rc = pwm_config_us( led->mpp_cfg->pwm_cfg->pwm_dev, duty_us, period_us); } else { duty_ns = ((period_us * NSEC_PER_USEC) / LED_FULL) * led->cdev.brightness; rc = pwm_config( led->mpp_cfg->pwm_cfg->pwm_dev, duty_ns, period_us * NSEC_PER_USEC); } if (rc < 0) { dev_err(&led->spmi_dev->dev, "Failed to " \ "configure pwm for new values\n"); return rc; } } if (led->mpp_cfg->pwm_mode != MANUAL_MODE) pwm_enable(led->mpp_cfg->pwm_cfg->pwm_dev); else { if (led->cdev.brightness < LED_MPP_CURRENT_MIN) led->cdev.brightness = LED_MPP_CURRENT_MIN; val = (led->cdev.brightness / LED_MPP_CURRENT_MIN) - 1; rc = qpnp_led_masked_write(led, LED_MPP_SINK_CTRL(led->base), LED_MPP_SINK_MASK, val); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write sink control reg\n"); return rc; } } val = (led->mpp_cfg->source_sel & LED_MPP_SRC_MASK) | (led->mpp_cfg->mode_ctrl & LED_MPP_MODE_CTRL_MASK); rc = qpnp_led_masked_write(led, LED_MPP_MODE_CTRL(led->base), LED_MPP_MODE_MASK, val); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led mode reg\n"); return rc; } rc = qpnp_led_masked_write(led, LED_MPP_EN_CTRL(led->base), LED_MPP_EN_MASK, LED_MPP_EN_ENABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led enable " \ "reg\n"); return rc; } } else { if (led->mpp_cfg->pwm_mode != MANUAL_MODE) { led->mpp_cfg->pwm_cfg->mode = led->mpp_cfg->pwm_cfg->default_mode; led->mpp_cfg->pwm_mode = led->mpp_cfg->pwm_cfg->default_mode; pwm_disable(led->mpp_cfg->pwm_cfg->pwm_dev); } rc = qpnp_led_masked_write(led, LED_MPP_MODE_CTRL(led->base), LED_MPP_MODE_MASK, LED_MPP_MODE_DISABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led mode reg\n"); return rc; } rc = qpnp_led_masked_write(led, LED_MPP_EN_CTRL(led->base), LED_MPP_EN_MASK, LED_MPP_EN_DISABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led enable reg\n"); return rc; } } if (led->mpp_cfg->pwm_mode != MANUAL_MODE) led->mpp_cfg->pwm_cfg->blinking = false; qpnp_dump_regs(led, mpp_debug_regs, ARRAY_SIZE(mpp_debug_regs)); return 0; } static int qpnp_flash_regulator_operate(struct qpnp_led_data *led, bool on) { u8 buf = 0; int rc = 0; static bool is_phy_vbus_write; if (!led) return -EINVAL; if (!on || !led->cdev.brightness) goto regulator_turn_off; /* SMBB_USB_SUSP: USB Suspend */ buf = 0x01; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1347, &buf, 1); if (rc) { dev_err(&led->spmi_dev->dev, "SMBB_USB_SUSP reg write failed(%d)\n", rc); return rc; } rc = spmi_ext_register_readl(led->spmi_dev->ctrl, 0, 0x13EA, &buf, 1); if (rc) pr_err("SPMI read failed base:0x13EA rc=%d\n", rc); if (buf != 0x2F) { is_phy_vbus_write = true; /* SMBB_USB_SEC_ACCESS */ buf = 0xA5; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x13D0, &buf, 1); if (rc) { dev_err(&led->spmi_dev->dev, "SMBB_USB_SEC_ACCESS reg write failed(%d)\n", rc); return rc; } /* SMBB_USB_COMP_OVR1: overrides USBIN_ULIMIT_OK and USBIN_LLIMIT_OK to 1 and CHG_GONE comparator to 0. */ buf = 0x2F; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x13EA, &buf, 1); if (rc) { dev_err(&led->spmi_dev->dev, "SMBB_USB_COMP_OVR1 reg write failed(%d)\n", rc); return rc; } } return rc; regulator_turn_off: if (is_phy_vbus_write) { is_phy_vbus_write = false; /* SMBB_USB_SEC_ACCESS */ buf = 0xA5; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x13D0, &buf, 1); if (rc) { dev_err(&led->spmi_dev->dev, "SMBB_USB_SEC_ACCESS reg write failed(%d)\n", rc); return rc; } /* SMBB_USB_COMP_OVR1: overrides USBIN_ULIMIT_OK and USBIN_LLIMIT_OK to 1 and CHG_GONE comparator to 0. */ buf = 0x00; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x13EA, &buf, 1); if (rc) { dev_err(&led->spmi_dev->dev, "SMBB_USB_COMP_OVR1 reg write failed(%d)\n", rc); return rc; } } /* SMBB_USB_SUSP: USB Suspend */ buf = 0x00; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1347, &buf, 1); if (rc) { dev_err(&led->spmi_dev->dev, "SMBB_USB_SUSP reg write failed(%d)\n", rc); return rc; } buf = 0x00; rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 1, 0xD346, &buf, 1); if (rc) { dev_err(&led->spmi_dev->dev, "FLASH_ENABLE reg write failed(%d)\n", rc); return rc; } return rc; } static int qpnp_torch_regulator_operate(struct qpnp_led_data *led, bool on) { int rc; if (!on) goto regulator_turn_off; if (!led->flash_cfg->torch_on) { rc = regulator_enable(led->flash_cfg->torch_boost_reg); if (rc) { dev_err(&led->spmi_dev->dev, "Regulator enable failed(%d)\n", rc); return rc; } led->flash_cfg->torch_on = true; } return 0; regulator_turn_off: if (led->flash_cfg->torch_on) { rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), FLASH_ENABLE_MODULE_MASK, FLASH_DISABLE_ALL); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); } rc = regulator_disable(led->flash_cfg->torch_boost_reg); if (rc) { dev_err(&led->spmi_dev->dev, "Regulator disable failed(%d)\n", rc); return rc; } led->flash_cfg->torch_on = false; } return 0; } static int qpnp_flash_set(struct qpnp_led_data *led) { int rc, error; int val = led->cdev.brightness; #ifdef CONFIG_MACH_LGE u8 charger_temp_config, uCTempThrSet; uCTempThrSet = 0xFD; rc = spmi_ext_register_readl(led->spmi_dev->ctrl, 0, 0x1066, &charger_temp_config, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read from addr=0x1066, rc(%d)\n", rc); } #endif if (led->flash_cfg->torch_enable) led->flash_cfg->current_prgm = (val * TORCH_MAX_LEVEL / led->max_current); else led->flash_cfg->current_prgm = (val * FLASH_MAX_LEVEL / led->max_current); if(val == 1) led->flash_cfg->current_prgm = 0; /* Set led current */ if (val > 0) { if (led->flash_cfg->torch_enable) { if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_DUAL) { rc = qpnp_torch_regulator_operate(led, true); if (rc) { dev_err(&led->spmi_dev->dev, "Torch regulator operate failed(%d)\n", rc); return rc; } } else if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE) { rc = qpnp_flash_regulator_operate(led, true); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); goto error_flash_set; } } rc = qpnp_led_masked_write(led, FLASH_LED_UNLOCK_SECURE(led->base), FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); if (rc) { dev_err(&led->spmi_dev->dev, "Secure reg write failed(%d)\n", rc); goto error_reg_write; } rc = qpnp_led_masked_write(led, FLASH_LED_TORCH(led->base), FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Torch reg write failed(%d)\n", rc); goto error_reg_write; } rc = qpnp_led_masked_write(led, led->flash_cfg->current_addr, FLASH_CURRENT_MASK, led->flash_cfg->current_prgm); if (rc) { dev_err(&led->spmi_dev->dev, "Current reg write failed(%d)\n", rc); goto error_reg_write; } rc = qpnp_led_masked_write(led, led->flash_cfg->second_addr, FLASH_CURRENT_MASK, led->flash_cfg->current_prgm); if (rc) { dev_err(&led->spmi_dev->dev, "2nd Current reg write failed(%d)\n", rc); goto error_reg_write; } qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base), FLASH_CURRENT_MASK, TORCH_MAX_LEVEL); if (rc) { dev_err(&led->spmi_dev->dev, "Max current reg write failed(%d)\n", rc); goto error_reg_write; } #ifdef CONFIG_MACH_LGE /* Increase charger temp threshold for flash */ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1066, &uCTempThrSet, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Write increased temp config failure (%d)\n", rc); return rc; } #endif rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), FLASH_ENABLE_MASK, led->flash_cfg->enable_module); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); goto error_reg_write; } if (!led->flash_cfg->strobe_type) led->flash_cfg->trigger_flash &= ~FLASH_HW_SW_STROBE_SEL_MASK; else led->flash_cfg->trigger_flash |= FLASH_HW_SW_STROBE_SEL_MASK; rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), led->flash_cfg->trigger_flash, led->flash_cfg->trigger_flash); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d strobe reg write failed(%d)\n", led->id, rc); goto error_reg_write; } } else { rc = qpnp_flash_regulator_operate(led, true); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); goto error_flash_set; } /* Set flash safety timer */ rc = qpnp_led_masked_write(led, FLASH_SAFETY_TIMER(led->base), FLASH_SAFETY_TIMER_MASK, led->flash_cfg->duration); if (rc) { dev_err(&led->spmi_dev->dev, "Safety timer reg write failed(%d)\n", rc); goto error_flash_set; } /* Set max current */ rc = qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base), FLASH_CURRENT_MASK, FLASH_MAX_LEVEL); if (rc) { dev_err(&led->spmi_dev->dev, "Max current reg write failed(%d)\n", rc); goto error_flash_set; } /* Set clamp current */ rc = qpnp_led_masked_write(led, FLASH_CLAMP_CURR(led->base), FLASH_CURRENT_MASK, led->flash_cfg->clamp_curr); if (rc) { dev_err(&led->spmi_dev->dev, "Clamp current reg write failed(%d)\n", rc); goto error_flash_set; } rc = qpnp_led_masked_write(led, led->flash_cfg->current_addr, FLASH_CURRENT_MASK, led->flash_cfg->current_prgm); if (rc) { dev_err(&led->spmi_dev->dev, "Current reg write failed(%d)\n", rc); goto error_flash_set; } #ifdef CONFIG_MACH_LGE /* Increase charger temp threshold for flash */ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1066, &uCTempThrSet, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Write increased temp config failure (%d)\n", rc); return rc; } #endif rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), led->flash_cfg->enable_module, led->flash_cfg->enable_module); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); goto error_flash_set; } /* * Add 1ms delay for bharger enter stable state */ usleep(FLASH_RAMP_UP_DELAY_US); if (!led->flash_cfg->strobe_type) { rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), led->flash_cfg->trigger_flash, led->flash_cfg->trigger_flash); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d strobe reg write failed(%d)\n", led->id, rc); goto error_flash_set; } } else { rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), (led->flash_cfg->trigger_flash | FLASH_HW_SW_STROBE_SEL_MASK), (led->flash_cfg->trigger_flash | FLASH_HW_SW_STROBE_SEL_MASK)); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d strobe reg write failed(%d)\n", led->id, rc); goto error_flash_set; } } } } else { rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), led->flash_cfg->trigger_flash, FLASH_DISABLE_ALL); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d flash write failed(%d)\n", led->id, rc); if (led->flash_cfg->torch_enable) goto error_torch_set; else goto error_flash_set; } if (led->flash_cfg->torch_enable) { rc = qpnp_led_masked_write(led, FLASH_LED_UNLOCK_SECURE(led->base), FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); if (rc) { dev_err(&led->spmi_dev->dev, "Secure reg write failed(%d)\n", rc); goto error_torch_set; } rc = qpnp_led_masked_write(led, FLASH_LED_TORCH(led->base), FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Torch reg write failed(%d)\n", rc); goto error_torch_set; } if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_DUAL) { rc = qpnp_torch_regulator_operate(led, false); if (rc) { dev_err(&led->spmi_dev->dev, "Torch regulator operate failed(%d)\n", rc); return rc; } } else if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE) { rc = qpnp_flash_regulator_operate(led, false); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); return rc; } } } else { /* * Disable module after ramp down complete for stable * behavior */ usleep(FLASH_RAMP_DN_DELAY_US); rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), led->flash_cfg->enable_module & ~FLASH_ENABLE_MODULE_MASK, FLASH_DISABLE_ALL); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); if (led->flash_cfg->torch_enable) goto error_torch_set; else goto error_flash_set; } rc = qpnp_flash_regulator_operate(led, false); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); return rc; } } #ifdef CONFIG_MACH_LGE /* Set charger temp config back to original settings */ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1066, &charger_temp_config, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Write original charger temp config failure (%d)\n", rc); return rc; } #endif } qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs)); return 0; error_reg_write: if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE) goto error_flash_set; error_torch_set: error = qpnp_torch_regulator_operate(led, false); if (error) { dev_err(&led->spmi_dev->dev, "Torch regulator operate failed(%d)\n", rc); return error; } return rc; error_flash_set: error = qpnp_flash_regulator_operate(led, false); if (error) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); return error; } return rc; } #ifdef CONFIG_LGE_DUAL_LED static int qpnp_flash_set2(struct qpnp_led_data *led) { int rc, error; int val = led->cdev.brightness; int val2 = led->cdev.brightness2; #ifdef CONFIG_MACH_LGE u8 charger_temp_config, uCTempThrSet; uCTempThrSet = 0xFD; rc = spmi_ext_register_readl(led->spmi_dev->ctrl, 0, 0x1066, &charger_temp_config, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read from addr=0x1066, rc(%d)\n", rc); } #endif pr_info("%s: %d: name = %s, val = %d, %d\n", __func__, __LINE__, led->cdev.name, val, val2); led->flash_cfg->current_prgm = (val * TORCH_MAX_LEVEL / led->max_current); led->flash_cfg->current_prgm2 = (val2 * TORCH_MAX_LEVEL / led->max_current); if(val == 1) led->flash_cfg->current_prgm = 0; if(val2 == 1) led->flash_cfg->current_prgm2 = 0; /* Set led current */ if (val > 0) { if (led->flash_cfg->torch_enable) { if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_DUAL) { rc = qpnp_torch_regulator_operate(led, true); if (rc) { dev_err(&led->spmi_dev->dev, "Torch regulator operate failed(%d)\n", rc); return rc; } } else if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE) { rc = qpnp_flash_regulator_operate(led, true); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); goto error_flash_set; } } rc = qpnp_led_masked_write(led, FLASH_LED_UNLOCK_SECURE(led->base), FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); if (rc) { dev_err(&led->spmi_dev->dev, "Secure reg write failed(%d)\n", rc); goto error_reg_write; } rc = qpnp_led_masked_write(led, FLASH_LED_TORCH(led->base), FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Torch reg write failed(%d)\n", rc); goto error_reg_write; } rc = qpnp_led_masked_write(led, led->flash_cfg->current_addr, FLASH_CURRENT_MASK, led->flash_cfg->current_prgm); if (rc) { dev_err(&led->spmi_dev->dev, "Current reg write failed(%d)\n", rc); goto error_reg_write; } rc = qpnp_led_masked_write(led, led->flash_cfg->second_addr, FLASH_CURRENT_MASK, led->flash_cfg->current_prgm2); if (rc) { dev_err(&led->spmi_dev->dev, "2nd Current reg write failed(%d)\n", rc); goto error_reg_write; } qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base), FLASH_CURRENT_MASK, TORCH_MAX_LEVEL); if (rc) { dev_err(&led->spmi_dev->dev, "Max current reg write failed(%d)\n", rc); goto error_reg_write; } #ifdef CONFIG_MACH_LGE /* Increase charger temp threshold for flash */ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1066, &uCTempThrSet, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Write increased temp config failure (%d)\n", rc); return rc; } #endif rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), FLASH_ENABLE_MASK, led->flash_cfg->enable_module); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); goto error_reg_write; } if (!led->flash_cfg->strobe_type) led->flash_cfg->trigger_flash &= ~FLASH_HW_SW_STROBE_SEL_MASK; else led->flash_cfg->trigger_flash |= FLASH_HW_SW_STROBE_SEL_MASK; rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), led->flash_cfg->trigger_flash, led->flash_cfg->trigger_flash); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d strobe reg write failed(%d)\n", led->id, rc); goto error_reg_write; } } else { rc = qpnp_flash_regulator_operate(led, true); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); goto error_flash_set; } /* Set flash safety timer */ rc = qpnp_led_masked_write(led, FLASH_SAFETY_TIMER(led->base), FLASH_SAFETY_TIMER_MASK, led->flash_cfg->duration); if (rc) { dev_err(&led->spmi_dev->dev, "Safety timer reg write failed(%d)\n", rc); goto error_flash_set; } /* Set max current */ rc = qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base), FLASH_CURRENT_MASK, FLASH_MAX_LEVEL); if (rc) { dev_err(&led->spmi_dev->dev, "Max current reg write failed(%d)\n", rc); goto error_flash_set; } /* Set clamp current */ rc = qpnp_led_masked_write(led, FLASH_CLAMP_CURR(led->base), FLASH_CURRENT_MASK, led->flash_cfg->clamp_curr); if (rc) { dev_err(&led->spmi_dev->dev, "Clamp current reg write failed(%d)\n", rc); goto error_flash_set; } rc = qpnp_led_masked_write(led, led->flash_cfg->current_addr, FLASH_CURRENT_MASK, led->flash_cfg->current_prgm); if (rc) { dev_err(&led->spmi_dev->dev, "Current reg write failed(%d)\n", rc); goto error_flash_set; } #ifdef CONFIG_MACH_LGE /* Increase charger temp threshold for flash */ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1066, &uCTempThrSet, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Write increased temp config failure (%d)\n", rc); return rc; } #endif rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), led->flash_cfg->enable_module, led->flash_cfg->enable_module); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); goto error_flash_set; } /* * Add 1ms delay for bharger enter stable state */ usleep(FLASH_RAMP_UP_DELAY_US); if (!led->flash_cfg->strobe_type) led->flash_cfg->trigger_flash &= ~FLASH_HW_SW_STROBE_SEL_MASK; else led->flash_cfg->trigger_flash |= FLASH_HW_SW_STROBE_SEL_MASK; rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), led->flash_cfg->trigger_flash, led->flash_cfg->trigger_flash); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d strobe reg write failed(%d)\n", led->id, rc); goto error_flash_set; } } } else { rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), led->flash_cfg->trigger_flash, FLASH_DISABLE_ALL); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d flash write failed(%d)\n", led->id, rc); if (led->flash_cfg->torch_enable) goto error_torch_set; else goto error_flash_set; } if (led->flash_cfg->torch_enable) { rc = qpnp_led_masked_write(led, FLASH_LED_UNLOCK_SECURE(led->base), FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE); if (rc) { dev_err(&led->spmi_dev->dev, "Secure reg write failed(%d)\n", rc); goto error_torch_set; } rc = qpnp_led_masked_write(led, FLASH_LED_TORCH(led->base), FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Torch reg write failed(%d)\n", rc); goto error_torch_set; } if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_DUAL) { rc = qpnp_torch_regulator_operate(led, false); if (rc) { dev_err(&led->spmi_dev->dev, "Torch regulator operate failed(%d)\n", rc); return rc; } } else if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE) { rc = qpnp_flash_regulator_operate(led, false); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); return rc; } } } else { /* * Disable module after ramp down complete for stable * behavior */ usleep(FLASH_RAMP_DN_DELAY_US); rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), led->flash_cfg->enable_module & ~FLASH_ENABLE_MODULE_MASK, FLASH_DISABLE_ALL); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); if (led->flash_cfg->torch_enable) goto error_torch_set; else goto error_flash_set; } rc = qpnp_flash_regulator_operate(led, false); if (rc) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); return rc; } } #ifdef CONFIG_MACH_LGE /* Set charger temp config back to original settings */ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, 0, 0x1066, &charger_temp_config, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Write original charger temp config failure (%d)\n", rc); return rc; } #endif } qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs)); return 0; error_reg_write: if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE) goto error_flash_set; error_torch_set: error = qpnp_torch_regulator_operate(led, false); if (error) { dev_err(&led->spmi_dev->dev, "Torch regulator operate failed(%d)\n", rc); return error; } return rc; error_flash_set: error = qpnp_flash_regulator_operate(led, false); if (error) { dev_err(&led->spmi_dev->dev, "Flash regulator operate failed(%d)\n", rc); return error; } return rc; } #endif static int qpnp_kpdbl_set(struct qpnp_led_data *led) { int rc; int duty_us, duty_ns, period_us; if (led->cdev.brightness) { if (!led->kpdbl_cfg->pwm_cfg->blinking) led->kpdbl_cfg->pwm_cfg->mode = led->kpdbl_cfg->pwm_cfg->default_mode; if (bitmap_empty(kpdbl_leds_in_use, NUM_KPDBL_LEDS)) { rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base), KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); return rc; } } /* On some platforms, GPLED1 channel should always be enabled * for the other GPLEDs 2/3/4 to glow. Before enabling GPLED * 2/3/4, first check if GPLED1 is already enabled. If GPLED1 * channel is not enabled, then enable the GPLED1 channel but * with a 0 brightness */ if (!led->kpdbl_cfg->always_on && !test_bit(KPDBL_MASTER_BIT_INDEX, kpdbl_leds_in_use) && kpdbl_master) { rc = pwm_config_us(kpdbl_master, 0, kpdbl_master_period_us); if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm config failed\n"); return rc; } rc = pwm_enable(kpdbl_master); if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm enable failed\n"); return rc; } set_bit(KPDBL_MASTER_BIT_INDEX, kpdbl_leds_in_use); } #ifdef CONFIG_LEDS_PM8941_EMOTIONAL led->cdev.brightness = led->cdev.brightness*28/100; if (kpdbl_brightness_flag == 0) { printk(KERN_INFO "[kpdbl LED] %s: %d\n", led->cdev.name, led->cdev.brightness); kpdbl_brightness_flag++; } #endif if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) { period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us; if (period_us > INT_MAX / NSEC_PER_USEC) { duty_us = (period_us * led->cdev.brightness) / KPDBL_MAX_LEVEL; rc = pwm_config_us( led->kpdbl_cfg->pwm_cfg->pwm_dev, duty_us, period_us); } else { duty_ns = ((period_us * NSEC_PER_USEC) / KPDBL_MAX_LEVEL) * led->cdev.brightness; rc = pwm_config( led->kpdbl_cfg->pwm_cfg->pwm_dev, duty_ns, period_us * NSEC_PER_USEC); } if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm config failed\n"); return rc; } } rc = pwm_enable(led->kpdbl_cfg->pwm_cfg->pwm_dev); if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm enable failed\n"); return rc; } /* workaround for KPDBL_LUT_RAMP_CONTROL, wonjong.shin@lge.com */ if (led->kpdbl_cfg->pwm_cfg->mode == LPG_MODE) { rc = qpnp_led_masked_write(led, 0xE3C8, 0xFF, 0x03); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write KPDBL_LUT_RAMP_CONTROL reg(%d)\n", rc); return rc; } } #ifdef CONFIG_LEDS_PM8941_EMOTIONAL if (led->kpdbl_cfg->pwm_cfg->leds_on == 0) { led->kpdbl_cfg->pwm_cfg->leds_on = 1; set_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use); /* is_kpdbl_master_turn_on will be set to true when GPLED1 * channel is enabled and has a valid brightness value */ if (led->kpdbl_cfg->always_on) is_kpdbl_master_turn_on = true; } #else set_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use); /* is_kpdbl_master_turn_on will be set to true when GPLED1 * channel is enabled and has a valid brightness value */ if (led->kpdbl_cfg->always_on) is_kpdbl_master_turn_on = true; #endif } else { led->kpdbl_cfg->pwm_cfg->mode = led->kpdbl_cfg->pwm_cfg->default_mode; /* Before disabling GPLED1, check if any other GPLED 2/3/4 is * on. If any of the other GPLED 2/3/4 is on, then have the * GPLED1 channel enabled with 0 brightness. */ if (led->kpdbl_cfg->always_on) { if (bitmap_weight(kpdbl_leds_in_use, NUM_KPDBL_LEDS) > 1) { rc = pwm_config_us( led->kpdbl_cfg->pwm_cfg->pwm_dev, 0, led->kpdbl_cfg->pwm_cfg->pwm_period_us); if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm config failed\n"); return rc; } rc = pwm_enable(led->kpdbl_cfg->pwm_cfg-> pwm_dev); if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm enable failed\n"); return rc; } } else { if (kpdbl_master) { pwm_disable(kpdbl_master); clear_bit(KPDBL_MASTER_BIT_INDEX, kpdbl_leds_in_use); rc = qpnp_led_masked_write( led, KPDBL_ENABLE(led->base), KPDBL_MODULE_EN_MASK, KPDBL_MODULE_DIS); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led" " enable reg\n"); return rc; } } } is_kpdbl_master_turn_on = false; } else { pwm_disable(led->kpdbl_cfg->pwm_cfg->pwm_dev); #ifdef CONFIG_LEDS_PM8941_EMOTIONAL led->kpdbl_cfg->pwm_cfg->leds_on = 0; #endif clear_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use); if (bitmap_weight(kpdbl_leds_in_use, NUM_KPDBL_LEDS) == 1 && kpdbl_master && !is_kpdbl_master_turn_on) { pwm_disable(kpdbl_master); clear_bit(KPDBL_MASTER_BIT_INDEX, kpdbl_leds_in_use); rc = qpnp_led_masked_write( led, KPDBL_ENABLE(led->base), KPDBL_MODULE_EN_MASK, KPDBL_MODULE_DIS); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led enable reg\n"); return rc; } is_kpdbl_master_turn_on = false; } #ifdef CONFIG_LEDS_PM8941_EMOTIONAL printk(KERN_INFO "[kpdbl LED] %s: %d\n", led->cdev.name, led->cdev.brightness); kpdbl_brightness_flag = 0; #endif } } led->kpdbl_cfg->pwm_cfg->blinking = false; qpnp_dump_regs(led, kpdbl_debug_regs, ARRAY_SIZE(kpdbl_debug_regs)); return 0; } static int qpnp_rgb_set(struct qpnp_led_data *led) { int rc; int duty_us, duty_ns, period_us; if (led->cdev.brightness) { if (!led->rgb_cfg->pwm_cfg->blinking) led->rgb_cfg->pwm_cfg->mode = led->rgb_cfg->pwm_cfg->default_mode; if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) { period_us = led->rgb_cfg->pwm_cfg->pwm_period_us; if (period_us > INT_MAX / NSEC_PER_USEC) { duty_us = (period_us * led->cdev.brightness) / LED_FULL; rc = pwm_config_us( led->rgb_cfg->pwm_cfg->pwm_dev, duty_us, period_us); } else { duty_ns = ((period_us * NSEC_PER_USEC) / LED_FULL) * led->cdev.brightness; rc = pwm_config( led->rgb_cfg->pwm_cfg->pwm_dev, duty_ns, period_us * NSEC_PER_USEC); } if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm config failed\n"); return rc; } } rc = qpnp_led_masked_write(led, RGB_LED_EN_CTL(led->base), led->rgb_cfg->enable, led->rgb_cfg->enable); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led enable reg\n"); return rc; } rc = pwm_enable(led->rgb_cfg->pwm_cfg->pwm_dev); if (rc < 0) { dev_err(&led->spmi_dev->dev, "pwm enable failed\n"); return rc; } } else { led->rgb_cfg->pwm_cfg->mode = led->rgb_cfg->pwm_cfg->default_mode; pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev); rc = qpnp_led_masked_write(led, RGB_LED_EN_CTL(led->base), led->rgb_cfg->enable, RGB_LED_DISABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led enable reg\n"); return rc; } } led->rgb_cfg->pwm_cfg->blinking = false; qpnp_dump_regs(led, rgb_pwm_debug_regs, ARRAY_SIZE(rgb_pwm_debug_regs)); return 0; } #ifdef CONFIG_LGE_DUAL_LED static void qpnp_led_set2(struct led_classdev *led_cdev, enum led_brightness value, enum led_brightness value2) { struct qpnp_led_data *led; led = container_of(led_cdev, struct qpnp_led_data, cdev); if (value < LED_OFF) { dev_err(&led->spmi_dev->dev, "Invalid brightness value\n"); return; } if (value > led->cdev.max_brightness) value = led->cdev.max_brightness; if (value2 > led->cdev.max_brightness) value2 = led->cdev.max_brightness; led->cdev.brightness = value; led->cdev.brightness2 = value2; schedule_work(&led->work); } #endif static void qpnp_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct qpnp_led_data *led; led = container_of(led_cdev, struct qpnp_led_data, cdev); if (value < LED_OFF) { dev_err(&led->spmi_dev->dev, "Invalid brightness value\n"); return; } if (value > led->cdev.max_brightness) value = led->cdev.max_brightness; #ifdef CONFIG_MACH_LGE mutex_lock(&led_sequence_lock); #endif led->cdev.brightness = value; #ifdef CONFIG_MACH_LGE mutex_unlock(&led_sequence_lock); #endif if (led->in_order_command_processing) queue_work(led->workqueue, &led->work); else schedule_work(&led->work); } static void __qpnp_led_work(struct qpnp_led_data *led, enum led_brightness value) { int rc; if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1) mutex_lock(&flash_lock); else mutex_lock(&led->lock); switch (led->id) { case QPNP_ID_WLED: rc = qpnp_wled_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "WLED set brightness failed (%d)\n", rc); break; case QPNP_ID_FLASH1_LED0: case QPNP_ID_FLASH1_LED1: #ifdef CONFIG_LGE_DUAL_LED if (led->flash_cfg->torch_enable) rc = qpnp_flash_set2(led); else #endif rc = qpnp_flash_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "FLASH set brightness failed (%d)\n", rc); break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: #ifdef CONFIG_LEDS_PM8941_EMOTIONAL rgb_luts_set(led); #else rc = qpnp_rgb_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "RGB set brightness failed (%d)\n", rc); #endif break; case QPNP_ID_LED_MPP: rc = qpnp_mpp_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "MPP set brightness failed (%d)\n", rc); break; case QPNP_ID_KPDBL: rc = qpnp_kpdbl_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "KPDBL set brightness failed (%d)\n", rc); break; default: dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id); break; } if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1) mutex_unlock(&flash_lock); else mutex_unlock(&led->lock); } static void qpnp_led_work(struct work_struct *work) { struct qpnp_led_data *led = container_of(work, struct qpnp_led_data, work); #ifdef CONFIG_MACH_LGE switch(led->id) { case QPNP_ID_FLASH1_LED0: case QPNP_ID_FLASH1_LED1: __qpnp_led_work(led, led->cdev.brightness); break; default: mutex_lock(&led_sequence_lock); __qpnp_led_work(led, led->cdev.brightness); mutex_unlock(&led_sequence_lock); break; } #else __qpnp_led_work(led, led->cdev.brightness); #endif return; } static int __devinit qpnp_led_set_max_brightness(struct qpnp_led_data *led) { switch (led->id) { case QPNP_ID_WLED: led->cdev.max_brightness = WLED_MAX_LEVEL; break; case QPNP_ID_FLASH1_LED0: case QPNP_ID_FLASH1_LED1: led->cdev.max_brightness = led->max_current; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: led->cdev.max_brightness = RGB_MAX_LEVEL; break; case QPNP_ID_LED_MPP: if (led->mpp_cfg->pwm_mode == MANUAL_MODE) led->cdev.max_brightness = led->max_current; else led->cdev.max_brightness = MPP_MAX_LEVEL; break; case QPNP_ID_KPDBL: led->cdev.max_brightness = KPDBL_MAX_LEVEL; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id); return -EINVAL; } return 0; } static enum led_brightness qpnp_led_get(struct led_classdev *led_cdev) { struct qpnp_led_data *led; led = container_of(led_cdev, struct qpnp_led_data, cdev); return led->cdev.brightness; } static void qpnp_led_turn_off_delayed(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct qpnp_led_data *led = container_of(dwork, struct qpnp_led_data, dwork); led->cdev.brightness = LED_OFF; qpnp_led_set(&led->cdev, led->cdev.brightness); } static void qpnp_led_turn_off(struct qpnp_led_data *led) { INIT_DELAYED_WORK(&led->dwork, qpnp_led_turn_off_delayed); schedule_delayed_work(&led->dwork, msecs_to_jiffies(led->turn_off_delay_ms)); } static int __devinit qpnp_wled_init(struct qpnp_led_data *led) { int rc, i; u8 num_wled_strings; num_wled_strings = led->wled_cfg->num_strings; /* verify ranges */ if (led->wled_cfg->ovp_val > WLED_OVP_27V) { dev_err(&led->spmi_dev->dev, "Invalid ovp value\n"); return -EINVAL; } if (led->wled_cfg->boost_curr_lim > WLED_CURR_LIMIT_1680mA) { dev_err(&led->spmi_dev->dev, "Invalid boost current limit\n"); return -EINVAL; } if (led->wled_cfg->cp_select > WLED_CP_SELECT_MAX) { dev_err(&led->spmi_dev->dev, "Invalid pole capacitance\n"); return -EINVAL; } if ((led->max_current > WLED_MAX_CURR)) { dev_err(&led->spmi_dev->dev, "Invalid max current\n"); return -EINVAL; } if ((led->wled_cfg->ctrl_delay_us % WLED_CTL_DLY_STEP) || (led->wled_cfg->ctrl_delay_us > WLED_CTL_DLY_MAX)) { dev_err(&led->spmi_dev->dev, "Invalid control delay\n"); return -EINVAL; } /* program over voltage protection threshold */ rc = qpnp_led_masked_write(led, WLED_OVP_CFG_REG(led->base), WLED_OVP_VAL_MASK, (led->wled_cfg->ovp_val << WLED_OVP_VAL_BIT_SHFT)); if (rc) { dev_err(&led->spmi_dev->dev, "WLED OVP reg write failed(%d)\n", rc); return rc; } /* program current boost limit */ rc = qpnp_led_masked_write(led, WLED_BOOST_LIMIT_REG(led->base), WLED_BOOST_LIMIT_MASK, led->wled_cfg->boost_curr_lim); if (rc) { dev_err(&led->spmi_dev->dev, "WLED boost limit reg write failed(%d)\n", rc); return rc; } /* program output feedback */ rc = qpnp_led_masked_write(led, WLED_FDBCK_CTRL_REG(led->base), WLED_OP_FDBCK_MASK, (led->wled_cfg->op_fdbck << WLED_OP_FDBCK_BIT_SHFT)); if (rc) { dev_err(&led->spmi_dev->dev, "WLED fdbck ctrl reg write failed(%d)\n", rc); return rc; } /* program switch frequency */ rc = qpnp_led_masked_write(led, WLED_SWITCHING_FREQ_REG(led->base), WLED_SWITCH_FREQ_MASK, led->wled_cfg->switch_freq); if (rc) { dev_err(&led->spmi_dev->dev, "WLED switch freq reg write failed(%d)\n", rc); return rc; } /* program current sink */ if (led->wled_cfg->cs_out_en) { rc = qpnp_led_masked_write(led, WLED_CURR_SINK_REG(led->base), WLED_CURR_SINK_MASK, (((1 << led->wled_cfg->num_strings) - 1) << WLED_CURR_SINK_SHFT)); if (rc) { dev_err(&led->spmi_dev->dev, "WLED curr sink reg write failed(%d)\n", rc); return rc; } } /* program high pole capacitance */ rc = qpnp_led_masked_write(led, WLED_HIGH_POLE_CAP_REG(led->base), WLED_CP_SELECT_MASK, led->wled_cfg->cp_select); if (rc) { dev_err(&led->spmi_dev->dev, "WLED pole cap reg write failed(%d)\n", rc); return rc; } /* program modulator, current mod src and cabc */ for (i = 0; i < num_wled_strings; i++) { rc = qpnp_led_masked_write(led, WLED_MOD_EN_REG(led->base, i), WLED_NO_MASK, WLED_EN_MASK); if (rc) { dev_err(&led->spmi_dev->dev, "WLED mod enable reg write failed(%d)\n", rc); return rc; } if (led->wled_cfg->dig_mod_gen_en) { rc = qpnp_led_masked_write(led, WLED_MOD_SRC_SEL_REG(led->base, i), WLED_NO_MASK, WLED_USE_EXT_GEN_MOD_SRC); if (rc) { dev_err(&led->spmi_dev->dev, "WLED dig mod en reg write failed(%d)\n", rc); } } rc = qpnp_led_masked_write(led, WLED_FULL_SCALE_REG(led->base, i), WLED_MAX_CURR_MASK, (u8)led->max_current); if (rc) { dev_err(&led->spmi_dev->dev, "WLED max current reg write failed(%d)\n", rc); return rc; } } /* Reset WLED enable register */ rc = qpnp_led_masked_write(led, WLED_MOD_CTRL_REG(led->base), WLED_8_BIT_MASK, WLED_BOOST_OFF); if (rc) { dev_err(&led->spmi_dev->dev, "WLED write ctrl reg failed(%d)\n", rc); return rc; } /* dump wled registers */ qpnp_dump_regs(led, wled_debug_regs, ARRAY_SIZE(wled_debug_regs)); return 0; } static ssize_t led_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; unsigned long state; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret = -EINVAL; ret = kstrtoul(buf, 10, &state); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); /* '1' to enable torch mode; '0' to switch to flash mode */ if (state == 1) led->flash_cfg->torch_enable = true; else led->flash_cfg->torch_enable = false; return count; } static ssize_t led_strobe_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; unsigned long state; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret = -EINVAL; ret = kstrtoul(buf, 10, &state); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); /* '0' for sw strobe; '1' for hw strobe */ if (state == 1) led->flash_cfg->strobe_type = 1; else led->flash_cfg->strobe_type = 0; return count; } static int qpnp_pwm_init(struct pwm_config_data *pwm_cfg, struct spmi_device *spmi_dev, const char *name) { int rc, start_idx, idx_len, lut_max_size; if (pwm_cfg->pwm_channel != -1) { pwm_cfg->pwm_dev = pwm_request(pwm_cfg->pwm_channel, name); if (IS_ERR_OR_NULL(pwm_cfg->pwm_dev)) { dev_err(&spmi_dev->dev, "could not acquire PWM Channel %d, " \ "error %ld\n", pwm_cfg->pwm_channel, PTR_ERR(pwm_cfg->pwm_dev)); pwm_cfg->pwm_dev = NULL; return -ENODEV; } if (pwm_cfg->mode == LPG_MODE) { start_idx = pwm_cfg->duty_cycles->start_idx; idx_len = pwm_cfg->duty_cycles->num_duty_pcts; if (strnstr(name, "kpdbl", sizeof("kpdbl"))) lut_max_size = PWM_GPLED_LUT_MAX_SIZE; else lut_max_size = PWM_LUT_MAX_SIZE; if (idx_len >= lut_max_size && start_idx) { dev_err(&spmi_dev->dev, "Wrong LUT size or index\n"); return -EINVAL; } if ((start_idx + idx_len) > lut_max_size) { dev_err(&spmi_dev->dev, "Exceed LUT limit\n"); return -EINVAL; } rc = pwm_lut_config(pwm_cfg->pwm_dev, pwm_cfg->pwm_period_us, pwm_cfg->duty_cycles->duty_pcts, pwm_cfg->lut_params); if (rc < 0) { dev_err(&spmi_dev->dev, "Failed to " \ "configure pwm LUT\n"); return rc; } } } else { dev_err(&spmi_dev->dev, "Invalid PWM channel\n"); return -EINVAL; } return 0; } static ssize_t pwm_us_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; u32 pwm_us; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret; u32 previous_pwm_us; struct pwm_config_data *pwm_cfg; led = container_of(led_cdev, struct qpnp_led_data, cdev); ret = kstrtou32(buf, 10, &pwm_us); if (ret) return ret; switch (led->id) { case QPNP_ID_LED_MPP: pwm_cfg = led->mpp_cfg->pwm_cfg; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: pwm_cfg = led->rgb_cfg->pwm_cfg; break; case QPNP_ID_KPDBL: pwm_cfg = led->kpdbl_cfg->pwm_cfg; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for pwm_us\n"); return -EINVAL; } if (pwm_cfg->mode == LPG_MODE) pwm_cfg->blinking = true; previous_pwm_us = pwm_cfg->pwm_period_us; pwm_cfg->pwm_period_us = pwm_us; pwm_free(pwm_cfg->pwm_dev); ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (ret) { pwm_cfg->pwm_period_us = previous_pwm_us; pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); qpnp_led_set(&led->cdev, led->cdev.brightness); dev_err(&led->spmi_dev->dev, "Failed to initialize pwm with new pwm_us value\n"); return ret; } qpnp_led_set(&led->cdev, led->cdev.brightness); return count; } static ssize_t pause_lo_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; u32 pause_lo; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret; u32 previous_pause_lo; struct pwm_config_data *pwm_cfg; ret = kstrtou32(buf, 10, &pause_lo); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); switch (led->id) { case QPNP_ID_LED_MPP: pwm_cfg = led->mpp_cfg->pwm_cfg; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: pwm_cfg = led->rgb_cfg->pwm_cfg; break; case QPNP_ID_KPDBL: pwm_cfg = led->kpdbl_cfg->pwm_cfg; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for pause lo\n"); return -EINVAL; } if (pwm_cfg->mode == LPG_MODE) pwm_cfg->blinking = true; previous_pause_lo = pwm_cfg->lut_params.lut_pause_lo; pwm_free(pwm_cfg->pwm_dev); pwm_cfg->lut_params.lut_pause_lo = pause_lo; ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (ret) { pwm_cfg->lut_params.lut_pause_lo = previous_pause_lo; pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); qpnp_led_set(&led->cdev, led->cdev.brightness); dev_err(&led->spmi_dev->dev, "Failed to initialize pwm with new pause lo value\n"); return ret; } qpnp_led_set(&led->cdev, led->cdev.brightness); return count; } static ssize_t pause_hi_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; u32 pause_hi; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret; u32 previous_pause_hi; struct pwm_config_data *pwm_cfg; ret = kstrtou32(buf, 10, &pause_hi); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); switch (led->id) { case QPNP_ID_LED_MPP: pwm_cfg = led->mpp_cfg->pwm_cfg; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: pwm_cfg = led->rgb_cfg->pwm_cfg; break; case QPNP_ID_KPDBL: pwm_cfg = led->kpdbl_cfg->pwm_cfg; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for pause hi\n"); return -EINVAL; } if (pwm_cfg->mode == LPG_MODE) pwm_cfg->blinking = true; previous_pause_hi = pwm_cfg->lut_params.lut_pause_hi; pwm_free(pwm_cfg->pwm_dev); pwm_cfg->lut_params.lut_pause_hi = pause_hi; ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (ret) { pwm_cfg->lut_params.lut_pause_hi = previous_pause_hi; pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); qpnp_led_set(&led->cdev, led->cdev.brightness); dev_err(&led->spmi_dev->dev, "Failed to initialize pwm with new pause hi value\n"); return ret; } qpnp_led_set(&led->cdev, led->cdev.brightness); return count; } static ssize_t start_idx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; u32 start_idx; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret; u32 previous_start_idx; struct pwm_config_data *pwm_cfg; ret = kstrtou32(buf, 10, &start_idx); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); switch (led->id) { case QPNP_ID_LED_MPP: pwm_cfg = led->mpp_cfg->pwm_cfg; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: pwm_cfg = led->rgb_cfg->pwm_cfg; break; case QPNP_ID_KPDBL: pwm_cfg = led->kpdbl_cfg->pwm_cfg; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for start idx\n"); return -EINVAL; } if (pwm_cfg->mode == LPG_MODE) pwm_cfg->blinking = true; previous_start_idx = pwm_cfg->duty_cycles->start_idx; pwm_cfg->duty_cycles->start_idx = start_idx; pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx; pwm_free(pwm_cfg->pwm_dev); ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (ret) { pwm_cfg->duty_cycles->start_idx = previous_start_idx; pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx; pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); qpnp_led_set(&led->cdev, led->cdev.brightness); dev_err(&led->spmi_dev->dev, "Failed to initialize pwm with new start idx value\n"); return ret; } qpnp_led_set(&led->cdev, led->cdev.brightness); return count; } static ssize_t ramp_step_ms_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; u32 ramp_step_ms; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret; u32 previous_ramp_step_ms; struct pwm_config_data *pwm_cfg; ret = kstrtou32(buf, 10, &ramp_step_ms); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); switch (led->id) { case QPNP_ID_LED_MPP: pwm_cfg = led->mpp_cfg->pwm_cfg; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: pwm_cfg = led->rgb_cfg->pwm_cfg; break; case QPNP_ID_KPDBL: pwm_cfg = led->kpdbl_cfg->pwm_cfg; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for ramp step\n"); return -EINVAL; } if (pwm_cfg->mode == LPG_MODE) pwm_cfg->blinking = true; previous_ramp_step_ms = pwm_cfg->lut_params.ramp_step_ms; pwm_free(pwm_cfg->pwm_dev); pwm_cfg->lut_params.ramp_step_ms = ramp_step_ms; ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (ret) { pwm_cfg->lut_params.ramp_step_ms = previous_ramp_step_ms; pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); qpnp_led_set(&led->cdev, led->cdev.brightness); dev_err(&led->spmi_dev->dev, "Failed to initialize pwm with new ramp step value\n"); return ret; } qpnp_led_set(&led->cdev, led->cdev.brightness); return count; } static ssize_t lut_flags_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; u32 lut_flags; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret; u32 previous_lut_flags; struct pwm_config_data *pwm_cfg; ret = kstrtou32(buf, 10, &lut_flags); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); switch (led->id) { case QPNP_ID_LED_MPP: pwm_cfg = led->mpp_cfg->pwm_cfg; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: pwm_cfg = led->rgb_cfg->pwm_cfg; break; case QPNP_ID_KPDBL: pwm_cfg = led->kpdbl_cfg->pwm_cfg; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for lut flags\n"); return -EINVAL; } if (pwm_cfg->mode == LPG_MODE) pwm_cfg->blinking = true; previous_lut_flags = pwm_cfg->lut_params.flags; pwm_free(pwm_cfg->pwm_dev); pwm_cfg->lut_params.flags = lut_flags; ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (ret) { pwm_cfg->lut_params.flags = previous_lut_flags; pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); qpnp_led_set(&led->cdev, led->cdev.brightness); dev_err(&led->spmi_dev->dev, "Failed to initialize pwm with new lut flags value\n"); return ret; } qpnp_led_set(&led->cdev, led->cdev.brightness); return count; } static ssize_t duty_pcts_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; int num_duty_pcts = 0; struct led_classdev *led_cdev = dev_get_drvdata(dev); char *buffer; ssize_t ret; int i = 0; int max_duty_pcts; struct pwm_config_data *pwm_cfg; u32 previous_num_duty_pcts; int value; int *previous_duty_pcts; led = container_of(led_cdev, struct qpnp_led_data, cdev); switch (led->id) { case QPNP_ID_LED_MPP: pwm_cfg = led->mpp_cfg->pwm_cfg; max_duty_pcts = PWM_LUT_MAX_SIZE; break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: pwm_cfg = led->rgb_cfg->pwm_cfg; max_duty_pcts = PWM_LUT_MAX_SIZE; break; case QPNP_ID_KPDBL: pwm_cfg = led->kpdbl_cfg->pwm_cfg; max_duty_pcts = PWM_GPLED_LUT_MAX_SIZE; break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for duty pcts\n"); return -EINVAL; } if (pwm_cfg->mode == LPG_MODE) pwm_cfg->blinking = true; buffer = (char *)buf; for (i = 0; i < max_duty_pcts; i++) { if (buffer == NULL) break; ret = sscanf((const char *)buffer, "%u,%s", &value, buffer); pwm_cfg->old_duty_pcts[i] = value; num_duty_pcts++; if (ret <= 1) break; } if (num_duty_pcts >= max_duty_pcts) { dev_err(&led->spmi_dev->dev, "Number of duty pcts given exceeds max (%d)\n", max_duty_pcts); return -EINVAL; } previous_num_duty_pcts = pwm_cfg->duty_cycles->num_duty_pcts; previous_duty_pcts = pwm_cfg->duty_cycles->duty_pcts; pwm_cfg->duty_cycles->num_duty_pcts = num_duty_pcts; pwm_cfg->duty_cycles->duty_pcts = pwm_cfg->old_duty_pcts; pwm_cfg->old_duty_pcts = previous_duty_pcts; pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts; pwm_free(pwm_cfg->pwm_dev); ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (ret) goto restore; qpnp_led_set(&led->cdev, led->cdev.brightness); return count; restore: dev_err(&led->spmi_dev->dev, "Failed to initialize pwm with new duty pcts value\n"); pwm_cfg->duty_cycles->num_duty_pcts = previous_num_duty_pcts; pwm_cfg->old_duty_pcts = pwm_cfg->duty_cycles->duty_pcts; pwm_cfg->duty_cycles->duty_pcts = previous_duty_pcts; pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts; pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); qpnp_led_set(&led->cdev, led->cdev.brightness); return ret; } static void led_blink(struct qpnp_led_data *led, struct pwm_config_data *pwm_cfg) { int rc; flush_work(&led->work); mutex_lock(&led->lock); if (pwm_cfg->use_blink) { if (led->cdev.brightness) { pwm_cfg->blinking = true; if (led->id == QPNP_ID_LED_MPP) led->mpp_cfg->pwm_mode = LPG_MODE; else if (led->id == QPNP_ID_KPDBL) led->kpdbl_cfg->pwm_mode = LPG_MODE; pwm_cfg->mode = LPG_MODE; } else { pwm_cfg->blinking = false; pwm_cfg->mode = pwm_cfg->default_mode; if (led->id == QPNP_ID_LED_MPP) led->mpp_cfg->pwm_mode = pwm_cfg->default_mode; else if (led->id == QPNP_ID_KPDBL) led->kpdbl_cfg->pwm_mode = pwm_cfg->default_mode; } pwm_free(pwm_cfg->pwm_dev); qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name); if (led->id == QPNP_ID_RGB_RED || led->id == QPNP_ID_RGB_GREEN || led->id == QPNP_ID_RGB_BLUE) { rc = qpnp_rgb_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "RGB set brightness failed (%d)\n", rc); } else if (led->id == QPNP_ID_LED_MPP) { rc = qpnp_mpp_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "MPP set brightness failed (%d)\n", rc); } else if (led->id == QPNP_ID_KPDBL) { rc = qpnp_kpdbl_set(led); if (rc < 0) dev_err(&led->spmi_dev->dev, "KPDBL set brightness failed (%d)\n", rc); } } mutex_unlock(&led->lock); } static ssize_t blink_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qpnp_led_data *led; unsigned long blinking; struct led_classdev *led_cdev = dev_get_drvdata(dev); ssize_t ret = -EINVAL; ret = kstrtoul(buf, 10, &blinking); if (ret) return ret; led = container_of(led_cdev, struct qpnp_led_data, cdev); led->cdev.brightness = blinking ? led->cdev.max_brightness : 0; switch (led->id) { case QPNP_ID_LED_MPP: led_blink(led, led->mpp_cfg->pwm_cfg); break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: led_blink(led, led->rgb_cfg->pwm_cfg); break; case QPNP_ID_KPDBL: led_blink(led, led->kpdbl_cfg->pwm_cfg); break; default: dev_err(&led->spmi_dev->dev, "Invalid LED id type for blink\n"); return -EINVAL; } return count; } static DEVICE_ATTR(led_mode, 0664, NULL, led_mode_store); static DEVICE_ATTR(strobe, 0664, NULL, led_strobe_type_store); static DEVICE_ATTR(pwm_us, 0664, NULL, pwm_us_store); static DEVICE_ATTR(pause_lo, 0664, NULL, pause_lo_store); static DEVICE_ATTR(pause_hi, 0664, NULL, pause_hi_store); static DEVICE_ATTR(start_idx, 0664, NULL, start_idx_store); static DEVICE_ATTR(ramp_step_ms, 0664, NULL, ramp_step_ms_store); static DEVICE_ATTR(lut_flags, 0664, NULL, lut_flags_store); static DEVICE_ATTR(duty_pcts, 0664, NULL, duty_pcts_store); static DEVICE_ATTR(blink, 0664, NULL, blink_store); static struct attribute *led_attrs[] = { &dev_attr_led_mode.attr, &dev_attr_strobe.attr, NULL }; static const struct attribute_group led_attr_group = { .attrs = led_attrs, }; static struct attribute *pwm_attrs[] = { &dev_attr_pwm_us.attr, NULL }; static struct attribute *lpg_attrs[] = { &dev_attr_pause_lo.attr, &dev_attr_pause_hi.attr, &dev_attr_start_idx.attr, &dev_attr_ramp_step_ms.attr, &dev_attr_lut_flags.attr, &dev_attr_duty_pcts.attr, NULL }; static struct attribute *blink_attrs[] = { &dev_attr_blink.attr, NULL }; static const struct attribute_group pwm_attr_group = { .attrs = pwm_attrs, }; static const struct attribute_group lpg_attr_group = { .attrs = lpg_attrs, }; static const struct attribute_group blink_attr_group = { .attrs = blink_attrs, }; static int __devinit qpnp_flash_init(struct qpnp_led_data *led) { int rc; led->flash_cfg->flash_on = false; rc = qpnp_led_masked_write(led, FLASH_LED_STROBE_CTRL(led->base), FLASH_STROBE_MASK, FLASH_DISABLE_ALL); if (rc) { dev_err(&led->spmi_dev->dev, "LED %d flash write failed(%d)\n", led->id, rc); return rc; } /* Disable flash LED module */ rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base), FLASH_ENABLE_MASK, FLASH_DISABLE_ALL); if (rc) { dev_err(&led->spmi_dev->dev, "Enable reg write failed(%d)\n", rc); return rc; } if (led->flash_cfg->torch_enable) return 0; /* Set headroom */ rc = qpnp_led_masked_write(led, FLASH_HEADROOM(led->base), FLASH_HEADROOM_MASK, led->flash_cfg->headroom); if (rc) { dev_err(&led->spmi_dev->dev, "Headroom reg write failed(%d)\n", rc); return rc; } /* Set startup delay */ rc = qpnp_led_masked_write(led, FLASH_STARTUP_DELAY(led->base), FLASH_STARTUP_DLY_MASK, led->flash_cfg->startup_dly); if (rc) { dev_err(&led->spmi_dev->dev, "Startup delay reg write failed(%d)\n", rc); return rc; } /* Set timer control - safety or watchdog */ if (led->flash_cfg->safety_timer) { rc = qpnp_led_masked_write(led, FLASH_LED_TMR_CTRL(led->base), FLASH_TMR_MASK, FLASH_TMR_SAFETY); if (rc) { dev_err(&led->spmi_dev->dev, "LED timer ctrl reg write failed(%d)\n", rc); return rc; } } /* Set Vreg force */ rc = qpnp_led_masked_write(led, FLASH_VREG_OK_FORCE(led->base), FLASH_VREG_MASK, FLASH_HW_VREG_OK); if (rc) { dev_err(&led->spmi_dev->dev, "Vreg OK reg write failed(%d)\n", rc); return rc; } /* Set self fault check */ rc = qpnp_led_masked_write(led, FLASH_FAULT_DETECT(led->base), FLASH_FAULT_DETECT_MASK, FLASH_SELFCHECK_ENABLE); if (rc) { dev_err(&led->spmi_dev->dev, "Fault detect reg write failed(%d)\n", rc); return rc; } /* Set mask enable */ rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base), FLASH_MASK_REG_MASK, FLASH_MASK_1); if (rc) { dev_err(&led->spmi_dev->dev, "Mask enable reg write failed(%d)\n", rc); return rc; } /* Set current ramp */ rc = qpnp_led_masked_write(led, FLASH_CURRENT_RAMP(led->base), FLASH_CURRENT_RAMP_MASK, FLASH_RAMP_STEP_27US); if (rc) { dev_err(&led->spmi_dev->dev, "Current ramp reg write failed(%d)\n", rc); return rc; } #ifdef CONFIG_MACH_LGE /* Enable VPH_PWR_DROOP and set threshold to 2.9V (0xC2) */ rc = qpnp_led_masked_write(led, FLASH_VPH_PWR_DROOP(led->base), FLASH_VPH_PWR_DROOP_MASK, 0xC2); if (rc) { dev_err(&led->spmi_dev->dev, "FLASH_VPH_PWR_DROOP reg write failed(%d)\n", rc); return rc; } #endif led->flash_cfg->strobe_type = 0; /* dump flash registers */ qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs)); return 0; } static int __devinit qpnp_kpdbl_init(struct qpnp_led_data *led) { int rc; u8 val; /* workaround for GPLED pwm mode */ rc = qpnp_led_masked_write(led, 0xE2B1, 0xFF, 0x80); if (rc) { dev_err(&led->spmi_dev->dev, "workaround for GPLED pwm mode111(%d)\n", rc); return rc; } /* workaround for GPLED pwm mode */ rc = qpnp_led_masked_write(led, 0xE2B4, 0xFF, 0x04); if (rc) { dev_err(&led->spmi_dev->dev, "workaround for GPLED pwm mode222(%d)\n", rc); return rc; } /* select row source - vbst or vph */ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid, KPDBL_ROW_SRC_SEL(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read from addr=%x, rc(%d)\n", KPDBL_ROW_SRC_SEL(led->base), rc); return rc; } if (led->kpdbl_cfg->row_src_vbst) val |= 1 << led->kpdbl_cfg->row_id; else val &= ~(1 << led->kpdbl_cfg->row_id); rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, KPDBL_ROW_SRC_SEL(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read from addr=%x, rc(%d)\n", KPDBL_ROW_SRC_SEL(led->base), rc); return rc; } /* row source enable */ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid, KPDBL_ROW_SRC(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read from addr=%x, rc(%d)\n", KPDBL_ROW_SRC(led->base), rc); return rc; } if (led->kpdbl_cfg->row_src_en) val |= KPDBL_ROW_SCAN_EN_MASK | (1 << led->kpdbl_cfg->row_id); else val &= ~(1 << led->kpdbl_cfg->row_id); rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid, KPDBL_ROW_SRC(led->base), &val, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to write to addr=%x, rc(%d)\n", KPDBL_ROW_SRC(led->base), rc); return rc; } #ifndef CONFIG_LEDS_PM8941_EMOTIONAL /* enable module */ rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base), KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN); if (rc) { dev_err(&led->spmi_dev->dev, "Enable module write failed(%d)\n", rc); return rc; } #endif rc = qpnp_pwm_init(led->kpdbl_cfg->pwm_cfg, led->spmi_dev, led->cdev.name); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to initialize pwm\n"); return rc; } if (led->kpdbl_cfg->always_on) { kpdbl_master = led->kpdbl_cfg->pwm_cfg->pwm_dev; kpdbl_master_period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us; } /* dump kpdbl registers */ qpnp_dump_regs(led, kpdbl_debug_regs, ARRAY_SIZE(kpdbl_debug_regs)); return 0; } static int __devinit qpnp_rgb_init(struct qpnp_led_data *led) { int rc; rc = qpnp_led_masked_write(led, RGB_LED_SRC_SEL(led->base), RGB_LED_SRC_MASK, RGB_LED_SOURCE_VPH_PWR); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led source select register\n"); return rc; } rc = qpnp_pwm_init(led->rgb_cfg->pwm_cfg, led->spmi_dev, led->cdev.name); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to initialize pwm\n"); return rc; } /* Initialize led for use in auto trickle charging mode */ rc = qpnp_led_masked_write(led, RGB_LED_ATC_CTL(led->base), led->rgb_cfg->enable, led->rgb_cfg->enable); return 0; } static int __devinit qpnp_mpp_init(struct qpnp_led_data *led) { int rc; u8 val; if (led->max_current < LED_MPP_CURRENT_MIN || led->max_current > LED_MPP_CURRENT_MAX) { dev_err(&led->spmi_dev->dev, "max current for mpp is not valid\n"); return -EINVAL; } val = (led->mpp_cfg->current_setting / LED_MPP_CURRENT_PER_SETTING) - 1; if (val < 0) val = 0; rc = qpnp_led_masked_write(led, LED_MPP_VIN_CTRL(led->base), LED_MPP_VIN_MASK, led->mpp_cfg->vin_ctrl); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write led vin control reg\n"); return rc; } rc = qpnp_led_masked_write(led, LED_MPP_SINK_CTRL(led->base), LED_MPP_SINK_MASK, val); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to write sink control reg\n"); return rc; } if (led->mpp_cfg->pwm_mode != MANUAL_MODE) { rc = qpnp_pwm_init(led->mpp_cfg->pwm_cfg, led->spmi_dev, led->cdev.name); if (rc) { dev_err(&led->spmi_dev->dev, "Failed to initialize pwm\n"); return rc; } } return 0; } static int __devinit qpnp_led_initialize(struct qpnp_led_data *led) { int rc = 0; switch (led->id) { case QPNP_ID_WLED: rc = qpnp_wled_init(led); if (rc) dev_err(&led->spmi_dev->dev, "WLED initialize failed(%d)\n", rc); break; case QPNP_ID_FLASH1_LED0: case QPNP_ID_FLASH1_LED1: rc = qpnp_flash_init(led); if (rc) dev_err(&led->spmi_dev->dev, "FLASH initialize failed(%d)\n", rc); break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: rc = qpnp_rgb_init(led); if (rc) dev_err(&led->spmi_dev->dev, "RGB initialize failed(%d)\n", rc); break; case QPNP_ID_LED_MPP: rc = qpnp_mpp_init(led); if (rc) dev_err(&led->spmi_dev->dev, "MPP initialize failed(%d)\n", rc); break; case QPNP_ID_KPDBL: rc = qpnp_kpdbl_init(led); if (rc) dev_err(&led->spmi_dev->dev, "KPDBL initialize failed(%d)\n", rc); break; default: dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id); return -EINVAL; } return rc; } static int __devinit qpnp_get_common_configs(struct qpnp_led_data *led, struct device_node *node) { int rc; u32 val; const char *temp_string; led->cdev.default_trigger = LED_TRIGGER_DEFAULT; rc = of_property_read_string(node, "linux,default-trigger", &temp_string); if (!rc) led->cdev.default_trigger = temp_string; else if (rc != -EINVAL) return rc; led->default_on = false; rc = of_property_read_string(node, "qcom,default-state", &temp_string); if (!rc) { if (strncmp(temp_string, "on", sizeof("on")) == 0) led->default_on = true; } else if (rc != -EINVAL) return rc; led->turn_off_delay_ms = 0; rc = of_property_read_u32(node, "qcom,turn-off-delay-ms", &val); if (!rc) led->turn_off_delay_ms = val; else if (rc != -EINVAL) return rc; return 0; } /* * Handlers for alternative sources of platform_data */ static int __devinit qpnp_get_config_wled(struct qpnp_led_data *led, struct device_node *node) { u32 val; int rc; led->wled_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct wled_config_data), GFP_KERNEL); if (!led->wled_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid, PMIC_VERSION_REG, &led->wled_cfg->pmic_version, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read pmic ver, rc(%d)\n", rc); } led->wled_cfg->num_strings = WLED_DEFAULT_STRINGS; rc = of_property_read_u32(node, "qcom,num-strings", &val); if (!rc) led->wled_cfg->num_strings = (u8) val; else if (rc != -EINVAL) return rc; led->wled_cfg->ovp_val = WLED_DEFAULT_OVP_VAL; rc = of_property_read_u32(node, "qcom,ovp-val", &val); if (!rc) led->wled_cfg->ovp_val = (u8) val; else if (rc != -EINVAL) return rc; led->wled_cfg->boost_curr_lim = WLED_BOOST_LIM_DEFAULT; rc = of_property_read_u32(node, "qcom,boost-curr-lim", &val); if (!rc) led->wled_cfg->boost_curr_lim = (u8) val; else if (rc != -EINVAL) return rc; led->wled_cfg->cp_select = WLED_CP_SEL_DEFAULT; rc = of_property_read_u32(node, "qcom,cp-sel", &val); if (!rc) led->wled_cfg->cp_select = (u8) val; else if (rc != -EINVAL) return rc; led->wled_cfg->ctrl_delay_us = WLED_CTRL_DLY_DEFAULT; rc = of_property_read_u32(node, "qcom,ctrl-delay-us", &val); if (!rc) led->wled_cfg->ctrl_delay_us = (u8) val; else if (rc != -EINVAL) return rc; led->wled_cfg->op_fdbck = WLED_OP_FDBCK_DEFAULT; rc = of_property_read_u32(node, "qcom,op-fdbck", &val); if (!rc) led->wled_cfg->op_fdbck = (u8) val; else if (rc != -EINVAL) return rc; led->wled_cfg->switch_freq = WLED_SWITCH_FREQ_DEFAULT; rc = of_property_read_u32(node, "qcom,switch-freq", &val); if (!rc) led->wled_cfg->switch_freq = (u8) val; else if (rc != -EINVAL) return rc; led->wled_cfg->dig_mod_gen_en = of_property_read_bool(node, "qcom,dig-mod-gen-en"); led->wled_cfg->cs_out_en = of_property_read_bool(node, "qcom,cs-out-en"); return 0; } static int __devinit qpnp_get_config_flash(struct qpnp_led_data *led, struct device_node *node, bool *reg_set) { int rc; u32 val; led->flash_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct flash_config_data), GFP_KERNEL); if (!led->flash_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid, FLASH_PERIPHERAL_SUBTYPE(led->base), &led->flash_cfg->peripheral_subtype, 1); if (rc) { dev_err(&led->spmi_dev->dev, "Unable to read from addr=%x, rc(%d)\n", FLASH_PERIPHERAL_SUBTYPE(led->base), rc); } led->flash_cfg->torch_enable = of_property_read_bool(node, "qcom,torch-enable"); if (of_find_property(of_get_parent(node), "flash-wa-supply", NULL) && (!*reg_set)) { led->flash_cfg->flash_wa_reg = devm_regulator_get(&led->spmi_dev->dev, "flash-wa"); if (IS_ERR_OR_NULL(led->flash_cfg->flash_wa_reg)) { rc = PTR_ERR(led->flash_cfg->flash_wa_reg); if (rc != EPROBE_DEFER) { dev_err(&led->spmi_dev->dev, "Flash wa regulator get failed(%d)\n", rc); } } else led->flash_cfg->flash_wa_reg_get = true; } if (led->id == QPNP_ID_FLASH1_LED0) { led->flash_cfg->enable_module = FLASH_ENABLE_LED_0; led->flash_cfg->current_addr = FLASH_LED_0_CURR(led->base); led->flash_cfg->trigger_flash = FLASH_LED_0_OUTPUT; if (!*reg_set) { led->flash_cfg->flash_boost_reg = regulator_get(&led->spmi_dev->dev, "flash-boost"); if (IS_ERR(led->flash_cfg->flash_boost_reg)) { rc = PTR_ERR(led->flash_cfg->flash_boost_reg); dev_err(&led->spmi_dev->dev, "Regulator get failed(%d)\n", rc); goto error_get_flash_reg; } led->flash_cfg->flash_reg_get = true; *reg_set = true; } else led->flash_cfg->flash_reg_get = false; if (led->flash_cfg->torch_enable) { led->flash_cfg->second_addr = FLASH_LED_1_CURR(led->base); } } else if (led->id == QPNP_ID_FLASH1_LED1) { led->flash_cfg->enable_module = FLASH_ENABLE_LED_1; led->flash_cfg->current_addr = FLASH_LED_1_CURR(led->base); led->flash_cfg->trigger_flash = FLASH_LED_1_OUTPUT; if (!*reg_set) { led->flash_cfg->flash_boost_reg = regulator_get(&led->spmi_dev->dev, "flash-boost"); if (IS_ERR(led->flash_cfg->flash_boost_reg)) { rc = PTR_ERR(led->flash_cfg->flash_boost_reg); dev_err(&led->spmi_dev->dev, "Regulator get failed(%d)\n", rc); goto error_get_flash_reg; } led->flash_cfg->flash_reg_get = true; *reg_set = true; } else led->flash_cfg->flash_reg_get = false; if (led->flash_cfg->torch_enable) { led->flash_cfg->second_addr = FLASH_LED_0_CURR(led->base); } } else { dev_err(&led->spmi_dev->dev, "Unknown flash LED name given\n"); return -EINVAL; } if (led->flash_cfg->torch_enable) { if (of_find_property(of_get_parent(node), "torch-boost-supply", NULL)) { led->flash_cfg->torch_boost_reg = regulator_get(&led->spmi_dev->dev, "torch-boost"); if (IS_ERR(led->flash_cfg->torch_boost_reg)) { rc = PTR_ERR(led->flash_cfg->torch_boost_reg); dev_err(&led->spmi_dev->dev, "Torch regulator get failed(%d)\n", rc); goto error_get_torch_reg; } led->flash_cfg->enable_module = FLASH_ENABLE_MODULE; } else led->flash_cfg->enable_module = FLASH_ENABLE_ALL; led->flash_cfg->trigger_flash = FLASH_TORCH_OUTPUT; } rc = of_property_read_u32(node, "qcom,current", &val); if (!rc) { if (led->flash_cfg->torch_enable) { led->flash_cfg->current_prgm = (val * TORCH_MAX_LEVEL / led->max_current); return 0; } else led->flash_cfg->current_prgm = (val * FLASH_MAX_LEVEL / led->max_current); } else if (led->flash_cfg->torch_enable) goto error_get_torch_reg; else goto error_get_flash_reg; rc = of_property_read_u32(node, "qcom,headroom", &val); if (!rc) led->flash_cfg->headroom = (u8) val; else if (rc == -EINVAL) led->flash_cfg->headroom = HEADROOM_500mV; else goto error_get_flash_reg; rc = of_property_read_u32(node, "qcom,duration", &val); if (!rc) led->flash_cfg->duration = (u8)((val - 10) / 10); else if (rc == -EINVAL) led->flash_cfg->duration = FLASH_DURATION_200ms; else goto error_get_flash_reg; rc = of_property_read_u32(node, "qcom,clamp-curr", &val); if (!rc) led->flash_cfg->clamp_curr = (val * FLASH_MAX_LEVEL / led->max_current); else if (rc == -EINVAL) led->flash_cfg->clamp_curr = FLASH_CLAMP_200mA; else goto error_get_flash_reg; rc = of_property_read_u32(node, "qcom,startup-dly", &val); if (!rc) led->flash_cfg->startup_dly = (u8) val; else if (rc == -EINVAL) led->flash_cfg->startup_dly = DELAY_128us; else goto error_get_flash_reg; led->flash_cfg->safety_timer = of_property_read_bool(node, "qcom,safety-timer"); return 0; error_get_torch_reg: regulator_put(led->flash_cfg->torch_boost_reg); error_get_flash_reg: regulator_put(led->flash_cfg->flash_boost_reg); return rc; } static int __devinit qpnp_get_config_pwm(struct pwm_config_data *pwm_cfg, struct spmi_device *spmi_dev, struct device_node *node) { struct property *prop; int rc, i, lut_max_size; u32 val; u8 *temp_cfg; const char *led_label; rc = of_property_read_u32(node, "qcom,pwm-channel", &val); if (!rc) pwm_cfg->pwm_channel = val; else return rc; #ifndef CONFIG_MACH_LGE if (pwm_cfg->mode != MANUAL_MODE) { #endif rc = of_property_read_u32(node, "qcom,pwm-us", &val); if (!rc) pwm_cfg->pwm_period_us = val; else return rc; #ifndef CONFIG_MACH_LGE } #endif pwm_cfg->use_blink = of_property_read_bool(node, "qcom,use-blink"); if (pwm_cfg->mode == LPG_MODE || pwm_cfg->use_blink) { pwm_cfg->duty_cycles = devm_kzalloc(&spmi_dev->dev, sizeof(struct pwm_duty_cycles), GFP_KERNEL); if (!pwm_cfg->duty_cycles) { dev_err(&spmi_dev->dev, "Unable to allocate memory\n"); rc = -ENOMEM; goto bad_lpg_params; } #ifdef CONFIG_LEDS_PM8941_EMOTIONAL pwm_cfg->duty_cycles->num_duty_pcts = leds_pwm_duty_cycles.num_duty_pcts; pwm_cfg->duty_cycles->duty_pcts0 = leds_pwm_duty_cycles.duty_pcts0; pwm_cfg->duty_cycles->duty_pcts1 = leds_pwm_duty_cycles.duty_pcts1; pwm_cfg->duty_cycles->duty_pcts2 = leds_pwm_duty_cycles.duty_pcts2; pwm_cfg->duty_cycles->duty_pcts3 = leds_pwm_duty_cycles.duty_pcts3; pwm_cfg->duty_cycles->duty_pcts4 = leds_pwm_duty_cycles.duty_pcts4; pwm_cfg->duty_cycles->duty_pcts5 = leds_pwm_duty_cycles.duty_pcts5; pwm_cfg->duty_cycles->duty_pcts6 = leds_pwm_duty_cycles.duty_pcts6; pwm_cfg->duty_cycles->duty_pcts7 = leds_pwm_duty_cycles.duty_pcts7; pwm_cfg->duty_cycles->duty_pcts8 = leds_pwm_duty_cycles.duty_pcts8; pwm_cfg->duty_cycles->duty_pcts12 = leds_pwm_duty_cycles.duty_pcts12; pwm_cfg->duty_cycles->duty_pcts13 = leds_pwm_duty_cycles.duty_pcts13; pwm_cfg->duty_cycles->duty_pcts14 = leds_pwm_duty_cycles.duty_pcts14; pwm_cfg->duty_cycles->duty_pcts17 = leds_pwm_duty_cycles.duty_pcts17; pwm_cfg->duty_cycles->duty_pcts18 = leds_pwm_duty_cycles.duty_pcts18; pwm_cfg->duty_cycles->duty_pcts19 = leds_pwm_duty_cycles.duty_pcts19; pwm_cfg->duty_cycles->duty_pcts20 = leds_pwm_duty_cycles.duty_pcts20; pwm_cfg->duty_cycles->duty_pcts29 = leds_pwm_duty_cycles.duty_pcts29; pwm_cfg->duty_cycles->duty_pcts30 = leds_pwm_duty_cycles.duty_pcts30; pwm_cfg->duty_cycles->duty_pcts31 = leds_pwm_duty_cycles.duty_pcts31; pwm_cfg->duty_cycles->duty_pcts32 = leds_pwm_duty_cycles.duty_pcts32; pwm_cfg->duty_cycles->duty_pcts37 = leds_pwm_duty_cycles.duty_pcts37; pwm_cfg->duty_cycles->duty_pcts39 = leds_pwm_duty_cycles.duty_pcts39; pwm_cfg->duty_cycles->duty_pcts40 = leds_pwm_duty_cycles.duty_pcts40; pwm_cfg->duty_cycles->duty_pcts41 = leds_pwm_duty_cycles.duty_pcts41; pwm_cfg->duty_cycles->duty_pcts42 = leds_pwm_duty_cycles.duty_pcts42; pwm_cfg->duty_cycles->duty_pcts43 = leds_pwm_duty_cycles.duty_pcts43; pwm_cfg->duty_cycles->duty_pcts44 = leds_pwm_duty_cycles.duty_pcts44; pwm_cfg->duty_cycles->duty_pcts45 = leds_pwm_duty_cycles.duty_pcts45; pwm_cfg->duty_cycles->duty_pcts46 = leds_pwm_duty_cycles.duty_pcts46; pwm_cfg->duty_cycles->duty_pcts47 = leds_pwm_duty_cycles.duty_pcts47; pwm_cfg->duty_cycles->duty_pcts101 = leds_pwm_duty_cycles.duty_pcts101; pwm_cfg->duty_cycles->duty_pcts102 = leds_pwm_duty_cycles.duty_pcts102; #endif prop = of_find_property(node, "qcom,duty-pcts", &pwm_cfg->duty_cycles->num_duty_pcts); if (!prop) { dev_err(&spmi_dev->dev, "Looking up property " \ "node qcom,duty-pcts failed\n"); rc = -ENODEV; goto bad_lpg_params; } else if (!pwm_cfg->duty_cycles->num_duty_pcts) { dev_err(&spmi_dev->dev, "Invalid length of " \ "duty pcts\n"); rc = -EINVAL; goto bad_lpg_params; } rc = of_property_read_string(node, "label", &led_label); if (rc < 0) { dev_err(&spmi_dev->dev, "Failure reading label, rc = %d\n", rc); return rc; } if (strcmp(led_label, "kpdbl") == 0) lut_max_size = PWM_GPLED_LUT_MAX_SIZE; else lut_max_size = PWM_LUT_MAX_SIZE; pwm_cfg->duty_cycles->duty_pcts = devm_kzalloc(&spmi_dev->dev, sizeof(int) * lut_max_size, GFP_KERNEL); if (!pwm_cfg->duty_cycles->duty_pcts) { dev_err(&spmi_dev->dev, "Unable to allocate memory\n"); rc = -ENOMEM; goto bad_lpg_params; } pwm_cfg->old_duty_pcts = devm_kzalloc(&spmi_dev->dev, sizeof(int) * lut_max_size, GFP_KERNEL); if (!pwm_cfg->old_duty_pcts) { dev_err(&spmi_dev->dev, "Unable to allocate memory\n"); rc = -ENOMEM; goto bad_lpg_params; } temp_cfg = devm_kzalloc(&spmi_dev->dev, pwm_cfg->duty_cycles->num_duty_pcts * sizeof(u8), GFP_KERNEL); if (!temp_cfg) { dev_err(&spmi_dev->dev, "Failed to allocate " \ "memory for duty pcts\n"); rc = -ENOMEM; goto bad_lpg_params; } memcpy(temp_cfg, prop->value, pwm_cfg->duty_cycles->num_duty_pcts); for (i = 0; i < pwm_cfg->duty_cycles->num_duty_pcts; i++) pwm_cfg->duty_cycles->duty_pcts[i] = (int) temp_cfg[i]; rc = of_property_read_u32(node, "qcom,start-idx", &val); if (!rc) { pwm_cfg->lut_params.start_idx = val; pwm_cfg->duty_cycles->start_idx = val; } else goto bad_lpg_params; pwm_cfg->lut_params.lut_pause_hi = 0; rc = of_property_read_u32(node, "qcom,pause-hi", &val); if (!rc) pwm_cfg->lut_params.lut_pause_hi = val; else if (rc != -EINVAL) goto bad_lpg_params; pwm_cfg->lut_params.lut_pause_lo = 0; rc = of_property_read_u32(node, "qcom,pause-lo", &val); if (!rc) pwm_cfg->lut_params.lut_pause_lo = val; else if (rc != -EINVAL) goto bad_lpg_params; pwm_cfg->lut_params.ramp_step_ms = QPNP_LUT_RAMP_STEP_DEFAULT; rc = of_property_read_u32(node, "qcom,ramp-step-ms", &val); if (!rc) pwm_cfg->lut_params.ramp_step_ms = val; else if (rc != -EINVAL) goto bad_lpg_params; pwm_cfg->lut_params.flags = QPNP_LED_PWM_FLAGS; rc = of_property_read_u32(node, "qcom,lut-flags", &val); if (!rc) pwm_cfg->lut_params.flags = (u8) val; else if (rc != -EINVAL) goto bad_lpg_params; pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts; } return 0; bad_lpg_params: pwm_cfg->use_blink = false; if (pwm_cfg->mode == PWM_MODE) { dev_err(&spmi_dev->dev, "LPG parameters not set for" \ " blink mode, defaulting to PWM mode\n"); return 0; } return rc; }; static int qpnp_led_get_mode(const char *mode) { if (strncmp(mode, "manual", strlen(mode)) == 0) return MANUAL_MODE; else if (strncmp(mode, "pwm", strlen(mode)) == 0) return PWM_MODE; else if (strncmp(mode, "lpg", strlen(mode)) == 0) return LPG_MODE; else return -EINVAL; }; static int __devinit qpnp_get_config_kpdbl(struct qpnp_led_data *led, struct device_node *node) { int rc; u32 val; u8 led_mode; const char *mode; led->kpdbl_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct kpdbl_config_data), GFP_KERNEL); if (!led->kpdbl_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } rc = of_property_read_string(node, "qcom,mode", &mode); if (!rc) { led_mode = qpnp_led_get_mode(mode); if ((led_mode == MANUAL_MODE) || (led_mode == -EINVAL)) { dev_err(&led->spmi_dev->dev, "Selected mode not " \ "supported for kpdbl.\n"); return -EINVAL; } led->kpdbl_cfg->pwm_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct pwm_config_data), GFP_KERNEL); if (!led->kpdbl_cfg->pwm_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } led->kpdbl_cfg->pwm_cfg->mode = led_mode; led->kpdbl_cfg->pwm_cfg->default_mode = led_mode; } else return rc; rc = qpnp_get_config_pwm(led->kpdbl_cfg->pwm_cfg, led->spmi_dev, node); if (rc < 0) return rc; rc = of_property_read_u32(node, "qcom,row-id", &val); if (!rc) led->kpdbl_cfg->row_id = val; else return rc; led->kpdbl_cfg->row_src_vbst = of_property_read_bool(node, "qcom,row-src-vbst"); led->kpdbl_cfg->row_src_en = of_property_read_bool(node, "qcom,row-src-en"); led->kpdbl_cfg->always_on = of_property_read_bool(node, "qcom,always-on"); #ifdef CONFIG_LEDS_PM8941_EMOTIONAL led->kpdbl_cfg->pwm_cfg->mode = led->kpdbl_cfg->pwm_cfg->default_mode; if (led->kpdbl_cfg->pwm_cfg->pwm_channel == 8) kpdbl_lpg1 = led; else if (led->kpdbl_cfg->pwm_cfg->pwm_channel == 9) kpdbl_lpg2 = led; #endif return 0; } static int __devinit qpnp_get_config_rgb(struct qpnp_led_data *led, struct device_node *node) { int rc; u8 led_mode; const char *mode; led->rgb_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct rgb_config_data), GFP_KERNEL); if (!led->rgb_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } if (led->id == QPNP_ID_RGB_RED) led->rgb_cfg->enable = RGB_LED_ENABLE_RED; else if (led->id == QPNP_ID_RGB_GREEN) led->rgb_cfg->enable = RGB_LED_ENABLE_GREEN; else if (led->id == QPNP_ID_RGB_BLUE) led->rgb_cfg->enable = RGB_LED_ENABLE_BLUE; else return -EINVAL; rc = of_property_read_string(node, "qcom,mode", &mode); if (!rc) { led_mode = qpnp_led_get_mode(mode); if ((led_mode == MANUAL_MODE) || (led_mode == -EINVAL)) { dev_err(&led->spmi_dev->dev, "Selected mode not " \ "supported for rgb.\n"); return -EINVAL; } led->rgb_cfg->pwm_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct pwm_config_data), GFP_KERNEL); if (!led->rgb_cfg->pwm_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } led->rgb_cfg->pwm_cfg->mode = led_mode; led->rgb_cfg->pwm_cfg->default_mode = led_mode; } else return rc; rc = qpnp_get_config_pwm(led->rgb_cfg->pwm_cfg, led->spmi_dev, node); if (rc < 0) return rc; #ifdef CONFIG_LEDS_PM8941_EMOTIONAL if (led->id == QPNP_ID_RGB_RED) red_led = led; else if (led->id == QPNP_ID_RGB_GREEN) green_led = led; else if (led->id == QPNP_ID_RGB_BLUE) blue_led = led; #endif return 0; } #ifdef CONFIG_LEDS_PM8941_EMOTIONAL /* For pattern brightness tunning */ int leds_pwm_duty_pcts_brightness_tunning[79] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 65 }; void change_led_pattern(int pattern) { int *duty_pcts_red = NULL; int *duty_pcts_green = NULL; int *duty_pcts_blue = NULL; struct lut_params rgb_lut_params; int i; /* 1. set all leds brightness to 0 */ red_led->cdev.brightness = 0; green_led->cdev.brightness = 0; blue_led->cdev.brightness = 0; /* 2. run work-function, as brightness 0, all led turn off * qpnp_rgb_set(red_led); * qpnp_rgb_set(green_led); * qpnp_rgb_set(blue_led); */ /* 3. change LUT structure in platform device. */ switch (pattern) { case 0: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0; break; case 1: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts1; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts1; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts1; break; case 2: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts2; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts2; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts2; break; case 3: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts3; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts3; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts3; break; case 4: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts4; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts4; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts4; break; case 5: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts5; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts5; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts5; break; case 6: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts6; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts6; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts6; break; case 7: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts7; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts7; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts7; break; case 8: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts8; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts8; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts8; break; case 12: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts12; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts12; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts12; break; case 13: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts13; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts13; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts13; break; case 14: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts14; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts14; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts14; break; case 17: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts17; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts17; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts17; break; case 18: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts18; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts18; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts18; break; case 19: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts19; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts19; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts19; break; case 20: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts20; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts20; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts20; break; case 29: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts29; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts29; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts29; break; case 30: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts30; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts30; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts30; break; case 31: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts31; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts31; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts31; break; case 32: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts32; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts32; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts32; break; case 37: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts37; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts37; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts37; break; case 39: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts39; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts39; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts39; break; case 40: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts40; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts40; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts40; break; case 41: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts41; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts41; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts41; break; case 42: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts42; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts42; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts42; break; case 43: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts43; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts43; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts43; break; case 44: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts44; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts44; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts44; break; case 45: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts45; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts45; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts45; break; case 46: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts46; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts46; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts46; break; case 47: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts47; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts47; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts47; break; case 101: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts101; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts101; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts101; break; case 102: duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts102; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts102; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts102; break; default: return; } if (duty_pcts_red == NULL) pr_err("%s duty_pcts_red = NULL \n", __func__); if (duty_pcts_green == NULL) pr_err("%s duty_pcts_green = NULL \n", __func__); if (duty_pcts_blue == NULL) pr_err("%s duty_pcts_blue = NULL \n", __func__); if ( duty_pcts_red == NULL || duty_pcts_green == NULL || duty_pcts_blue == NULL) return; /* brightness tunning */ for(i = 0; i < 79; i++) { if(i >= 0 && i <= 62) { leds_pwm_duty_pcts_brightness_tunning[i] = duty_pcts_red[i] * BRIGHTNESS_TUNNING; } else { leds_pwm_duty_pcts_brightness_tunning[i] = duty_pcts_red[i]; } } duty_pcts_red = leds_pwm_duty_pcts_brightness_tunning; duty_pcts_green = leds_pwm_duty_pcts_brightness_tunning; duty_pcts_blue = leds_pwm_duty_pcts_brightness_tunning; /* 4. lut disable, so we can edit LUT table after done this. */ pwm_disable(red_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(green_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); /* 5. lut config(red led). */ rgb_lut_params.start_idx = duty_pcts_red[63]; rgb_lut_params.idx_len = duty_pcts_red[64]; rgb_lut_params.lut_pause_hi = duty_pcts_red[66]; rgb_lut_params.ramp_step_ms = duty_pcts_red[78]; rgb_lut_params.flags = duty_pcts_red[75]; if (rgb_lut_params.flags & 0x10) { /* for first missed noti delay */ rgb_lut_params.lut_pause_lo = 1000; } pwm_lut_config(red_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &duty_pcts_red[duty_pcts_red[63]], rgb_lut_params); /* 6. lut config(green led). */ rgb_lut_params.start_idx = duty_pcts_red[67]; rgb_lut_params.idx_len = duty_pcts_red[68]; rgb_lut_params.lut_pause_hi = duty_pcts_red[70]; rgb_lut_params.flags = duty_pcts_red[76]; pwm_lut_config(green_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &duty_pcts_red[duty_pcts_red[67]], rgb_lut_params); /* 7. lut config(blue led). */ rgb_lut_params.start_idx = duty_pcts_blue[71]; rgb_lut_params.idx_len = duty_pcts_blue[72]; rgb_lut_params.lut_pause_hi = duty_pcts_blue[74]; rgb_lut_params.flags = duty_pcts_blue[77]; pwm_lut_config(blue_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &duty_pcts_red[duty_pcts_red[71]], rgb_lut_params); /* 8. lut enable, so we can run led after done this. */ pwm_enable(red_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(green_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); /* 9. set leds brightness to 255 */ red_led->cdev.brightness = 255; green_led->cdev.brightness = 255; blue_led->cdev.brightness = 255; if (duty_pcts_red[65] == 0) red_led->cdev.brightness = 0; if (duty_pcts_red[69] == 0) green_led->cdev.brightness = 0; if (duty_pcts_red[73] == 0) blue_led->cdev.brightness = 0; /* 10. run work-function, as brightness 255, all led turn on */ qpnp_rgb_set(red_led); qpnp_rgb_set(green_led); qpnp_rgb_set(blue_led); } void make_input_led_pattern(int patterns[], int red_start, int red_length, int red_duty, int red_pause, int green_start, int green_length, int green_duty, int green_pause, int blue_start, int blue_length, int blue_duty, int blue_pause, int red_flag, int green_flag, int blue_flag, int period) { int *duty_pcts_red = NULL; int *duty_pcts_green = NULL; int *duty_pcts_blue = NULL; int input_patterns[79]; int i; struct lut_params rgb_lut_params; for (i = 0; i < 63; i++) input_patterns[i] = patterns[i]; input_patterns[i++] = red_start; input_patterns[i++] = red_length; input_patterns[i++] = red_duty; input_patterns[i++] = red_pause; input_patterns[i++] = green_start; input_patterns[i++] = green_length; input_patterns[i++] = green_duty; input_patterns[i++] = green_pause; input_patterns[i++] = blue_start; input_patterns[i++] = blue_length; input_patterns[i++] = blue_duty; input_patterns[i++] = blue_pause; input_patterns[i++] = red_flag; input_patterns[i++] = green_flag; input_patterns[i++] = blue_flag; input_patterns[i++] = period; /* 1. set all leds brightness to 0 */ red_led->cdev.brightness = 0; green_led->cdev.brightness = 0; blue_led->cdev.brightness = 0; /* 2. run work-function, as brightness 0, all led turn off */ /* qpnp_rgb_set(red_led); */ /* qpnp_rgb_set(green_led); */ /* qpnp_rgb_set(blue_led); */ /* 3. change LUT structure in platform device. */ red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0 = (int *)&input_patterns; green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0 = (int *)&input_patterns; blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0 = (int *)&input_patterns; duty_pcts_red = red_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0; duty_pcts_green = green_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0; duty_pcts_blue = blue_led->rgb_cfg->pwm_cfg->duty_cycles->duty_pcts0; /* 4. lut disable, so we can edit LUT table after done this. */ pwm_disable(red_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(green_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); /* 5. lut config(red led). */ /* rgb_lut_params.start_idx = duty_pcts_red[63]+1; */ rgb_lut_params.start_idx = red_start; rgb_lut_params.idx_len = red_length; rgb_lut_params.lut_pause_hi = red_pause; rgb_lut_params.lut_pause_lo = 0; rgb_lut_params.ramp_step_ms = period; rgb_lut_params.flags = red_flag; pwm_lut_config(red_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &duty_pcts_red[red_start], rgb_lut_params); /* 6. lut config(green led). */ /* rgb_lut_params.start_idx = duty_pcts_red[67]+1; */ rgb_lut_params.start_idx = green_start; rgb_lut_params.idx_len = green_length; rgb_lut_params.lut_pause_hi = green_pause; rgb_lut_params.flags = green_flag; pwm_lut_config(green_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &duty_pcts_red[green_start], rgb_lut_params); /* 7. lut config(blue led). */ /* rgb_lut_params.start_idx = duty_pcts_blue[71]+1 */ rgb_lut_params.start_idx = blue_start; rgb_lut_params.idx_len = blue_length; rgb_lut_params.lut_pause_hi = blue_pause; rgb_lut_params.flags = blue_flag; pwm_lut_config(blue_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &duty_pcts_red[blue_start], rgb_lut_params); /* 8. lut enable, so we can run led after done this. */ pwm_enable(red_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(green_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); /* 9. set leds brightness to 255 */ red_led->cdev.brightness = 255; green_led->cdev.brightness = 255; blue_led->cdev.brightness = 255; if (red_duty == 0) red_led->cdev.brightness = 0; if (green_duty == 0) green_led->cdev.brightness = 0; if (blue_duty == 0) blue_led->cdev.brightness = 0; /* 10. run work-function, as brightness 255, all led turn on */ qpnp_rgb_set(red_led); qpnp_rgb_set(green_led); qpnp_rgb_set(blue_led); } void make_blink_led_pattern(int rgb, int delay_on, int delay_off) { int blink_pattern[6] = {0, 0, 0, 0, 0, 0}; struct lut_params rgb_lut_params; red_led->cdev.brightness = 0; green_led->cdev.brightness = 0; blue_led->cdev.brightness = 0; pwm_disable(red_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(green_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); rgb_lut_params.idx_len = 2; rgb_lut_params.lut_pause_hi = delay_off/2; rgb_lut_params.lut_pause_lo = delay_on/2; rgb_lut_params.ramp_step_ms = 1; rgb_lut_params.flags = 91; blink_pattern[0] = ((rgb >> 16) & 0xFF) * BRIGHTNESS_TUNNING; blink_pattern[2] = ((rgb >> 8) & 0xFF) * BRIGHTNESS_TUNNING; blink_pattern[4] = (rgb & 0xFF) * BRIGHTNESS_TUNNING; rgb_lut_params.start_idx = 0; pwm_lut_config(red_led->rgb_cfg->pwm_cfg->pwm_dev, 200, blink_pattern, rgb_lut_params); rgb_lut_params.start_idx = 2; pwm_lut_config(green_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &blink_pattern[2], rgb_lut_params); rgb_lut_params.start_idx = 4; pwm_lut_config(blue_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &blink_pattern[4], rgb_lut_params); pwm_enable(red_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(green_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); red_led->cdev.brightness = (rgb >> 16) & 0xFF; green_led->cdev.brightness = (rgb >> 8) & 0xFF; blue_led->cdev.brightness = rgb & 0xFF; printk(KERN_INFO "[RGB LED] brightness R:%d G:%d B:%d\n", red_led->cdev.brightness, green_led->cdev.brightness, blue_led->cdev.brightness); qpnp_rgb_set(red_led); qpnp_rgb_set(green_led); qpnp_rgb_set(blue_led); } void make_onoff_led_pattern(int rgb) { int onoff_pattern[6] = {100, 100, 100, 100, 100, 100}; struct lut_params rgb_lut_params; red_led->cdev.brightness = 0; green_led->cdev.brightness = 0; blue_led->cdev.brightness = 0; pwm_disable(red_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(green_led->rgb_cfg->pwm_cfg->pwm_dev); pwm_disable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); rgb_lut_params.idx_len = 2; rgb_lut_params.lut_pause_hi = 0; rgb_lut_params.lut_pause_lo = 0; rgb_lut_params.ramp_step_ms = 0; rgb_lut_params.flags = 65; /* tuning RGB input from framework. PM8921 can use 512 resolution R : (rgb*1.4)/3 G : (rgb*2)/3 B : rgb/3 */ onoff_pattern[0] = ((rgb >> 16) & 0xFF) * BRIGHTNESS_TUNNING; onoff_pattern[2] = ((rgb >> 8) & 0xFF) * BRIGHTNESS_TUNNING; onoff_pattern[4] = (rgb & 0xFF) * BRIGHTNESS_TUNNING; onoff_pattern[1] = onoff_pattern[0]; onoff_pattern[3] = onoff_pattern[2]; onoff_pattern[5] = onoff_pattern[4]; rgb_lut_params.start_idx = 0; pwm_lut_config(red_led->rgb_cfg->pwm_cfg->pwm_dev, 200, onoff_pattern, rgb_lut_params); rgb_lut_params.start_idx = 2; pwm_lut_config(green_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &onoff_pattern[2], rgb_lut_params); rgb_lut_params.start_idx = 4; pwm_lut_config(blue_led->rgb_cfg->pwm_cfg->pwm_dev, 200, &onoff_pattern[4], rgb_lut_params); pwm_enable(red_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(green_led->rgb_cfg->pwm_cfg->pwm_dev); usleep(100); pwm_enable(blue_led->rgb_cfg->pwm_cfg->pwm_dev); red_led->cdev.brightness = (rgb >> 16) & 0xFF; green_led->cdev.brightness = (rgb >> 8) & 0xFF; blue_led->cdev.brightness = rgb & 0xFF; qpnp_rgb_set(red_led); qpnp_rgb_set(green_led); qpnp_rgb_set(blue_led); } static bool check_bootmode(void) { enum lge_boot_mode_type bootmode = LGE_BOOT_MODE_NORMAL; bootmode = lge_get_boot_mode(); if (bootmode == LGE_BOOT_MODE_FACTORY || bootmode == LGE_BOOT_MODE_FACTORY2 || bootmode == LGE_BOOT_MODE_PIFBOOT || bootmode == LGE_BOOT_MODE_PIFBOOT2) { return true; } else { return false; } } /* below function is for aat... */ void rgb_luts_set(struct qpnp_led_data *led) { int rgb_brightness = 0; if(check_bootmode()) { switch(led->id) { case 3: /*R*/ printk(KERN_INFO "[R CUR] %d [NEW] :%d\n",red_led->cdev.brightness, led->cdev.brightness); red_led->cdev.brightness = (led->cdev.brightness == 0) ? 0 : red_led->cdev.brightness; break; case 4: /*G*/ printk(KERN_INFO "[G CUR] %d [NEW] :%d\n",green_led->cdev.brightness, led->cdev.brightness); green_led->cdev.brightness = (led->cdev.brightness == 0) ? 0 : green_led->cdev.brightness; break; case 5: /* B*/ printk(KERN_INFO "[B CUR] %d [NEW] :%d\n",blue_led->cdev.brightness, led->cdev.brightness); blue_led->cdev.brightness = (led->cdev.brightness == 0) ? 0 : blue_led->cdev.brightness; break; default: /* unknown */ break; } } rgb_brightness = (red_led->cdev.brightness << 16) + (green_led->cdev.brightness << 8) + blue_led->cdev.brightness; if (rgb_brightness > 0) make_onoff_led_pattern(rgb_brightness); else make_onoff_led_pattern(0); printk(KERN_INFO "[RGB LED] brightness R:%d G:%d B:%d\n", red_led->cdev.brightness, green_led->cdev.brightness, blue_led->cdev.brightness); } #endif static int __devinit qpnp_get_config_mpp(struct qpnp_led_data *led, struct device_node *node) { int rc; u32 val; u8 led_mode; const char *mode; led->mpp_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct mpp_config_data), GFP_KERNEL); if (!led->mpp_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } led->mpp_cfg->current_setting = LED_MPP_CURRENT_MIN; rc = of_property_read_u32(node, "qcom,current-setting", &val); if (!rc) { if (led->mpp_cfg->current_setting < LED_MPP_CURRENT_MIN) led->mpp_cfg->current_setting = LED_MPP_CURRENT_MIN; else if (led->mpp_cfg->current_setting > LED_MPP_CURRENT_MAX) led->mpp_cfg->current_setting = LED_MPP_CURRENT_MAX; else led->mpp_cfg->current_setting = (u8) val; } else if (rc != -EINVAL) return rc; led->mpp_cfg->source_sel = LED_MPP_SOURCE_SEL_DEFAULT; rc = of_property_read_u32(node, "qcom,source-sel", &val); if (!rc) led->mpp_cfg->source_sel = (u8) val; else if (rc != -EINVAL) return rc; led->mpp_cfg->mode_ctrl = LED_MPP_MODE_SINK; rc = of_property_read_u32(node, "qcom,mode-ctrl", &val); if (!rc) led->mpp_cfg->mode_ctrl = (u8) val; else if (rc != -EINVAL) return rc; led->mpp_cfg->vin_ctrl = LED_MPP_VIN_CTRL_DEFAULT; rc = of_property_read_u32(node, "qcom,vin-ctrl", &val); if (!rc) led->mpp_cfg->vin_ctrl = (u8) val; else if (rc != -EINVAL) return rc; led->mpp_cfg->min_brightness = 0; rc = of_property_read_u32(node, "qcom,min-brightness", &val); if (!rc) led->mpp_cfg->min_brightness = (u8) val; else if (rc != -EINVAL) return rc; rc = of_property_read_string(node, "qcom,mode", &mode); if (!rc) { led_mode = qpnp_led_get_mode(mode); led->mpp_cfg->pwm_mode = led_mode; if (led_mode == MANUAL_MODE) return MANUAL_MODE; else if (led_mode == -EINVAL) { dev_err(&led->spmi_dev->dev, "Selected mode not " \ "supported for mpp.\n"); return -EINVAL; } led->mpp_cfg->pwm_cfg = devm_kzalloc(&led->spmi_dev->dev, sizeof(struct pwm_config_data), GFP_KERNEL); if (!led->mpp_cfg->pwm_cfg) { dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n"); return -ENOMEM; } led->mpp_cfg->pwm_cfg->mode = led_mode; led->mpp_cfg->pwm_cfg->default_mode = led_mode; } else return rc; rc = qpnp_get_config_pwm(led->mpp_cfg->pwm_cfg, led->spmi_dev, node); if (rc < 0) return rc; return 0; } static int __devinit qpnp_leds_probe(struct spmi_device *spmi) { struct qpnp_led_data *led, *led_array; struct resource *led_resource; struct device_node *node, *temp; int rc, i, num_leds = 0, parsed_leds = 0; const char *led_label; bool regulator_probe = false; node = spmi->dev.of_node; if (node == NULL) return -ENODEV; temp = NULL; while ((temp = of_get_next_child(node, temp))) num_leds++; if (!num_leds) return -ECHILD; led_array = devm_kzalloc(&spmi->dev, (sizeof(struct qpnp_led_data) * num_leds), GFP_KERNEL); if (!led_array) { dev_err(&spmi->dev, "Unable to allocate memory\n"); return -ENOMEM; } for_each_child_of_node(node, temp) { led = &led_array[parsed_leds]; led->num_leds = num_leds; led->spmi_dev = spmi; led_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0); if (!led_resource) { dev_err(&spmi->dev, "Unable to get LED base address\n"); rc = -ENXIO; goto fail_id_check; } led->base = led_resource->start; rc = of_property_read_string(temp, "label", &led_label); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Failure reading label, rc = %d\n", rc); goto fail_id_check; } rc = of_property_read_string(temp, "linux,name", &led->cdev.name); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Failure reading led name, rc = %d\n", rc); goto fail_id_check; } rc = of_property_read_u32(temp, "qcom,max-current", &led->max_current); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Failure reading max_current, rc = %d\n", rc); goto fail_id_check; } rc = of_property_read_u32(temp, "qcom,id", &led->id); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Failure reading led id, rc = %d\n", rc); goto fail_id_check; } rc = qpnp_get_common_configs(led, temp); if (rc) { dev_err(&led->spmi_dev->dev, "Failure reading common led configuration," \ " rc = %d\n", rc); goto fail_id_check; } led->cdev.brightness_set = qpnp_led_set; #ifdef CONFIG_LGE_DUAL_LED led->cdev.brightness_set2 = qpnp_led_set2; #endif led->cdev.brightness_get = qpnp_led_get; if (strncmp(led_label, "wled", sizeof("wled")) == 0) { rc = qpnp_get_config_wled(led, temp); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Unable to read wled config data\n"); goto fail_id_check; } } else if (strncmp(led_label, "flash", sizeof("flash")) == 0) { if (!of_find_property(node, "flash-boost-supply", NULL)) regulator_probe = true; #ifndef CONFIG_QPNP_CHARGER regulator_probe = true; #endif rc = qpnp_get_config_flash(led, temp, &regulator_probe); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Unable to read flash config data\n"); goto fail_id_check; } } else if (strncmp(led_label, "rgb", sizeof("rgb")) == 0) { rc = qpnp_get_config_rgb(led, temp); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Unable to read rgb config data\n"); goto fail_id_check; } } else if (strncmp(led_label, "mpp", sizeof("mpp")) == 0) { rc = qpnp_get_config_mpp(led, temp); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Unable to read mpp config data\n"); goto fail_id_check; } } else if (strncmp(led_label, "kpdbl", sizeof("kpdbl")) == 0) { bitmap_zero(kpdbl_leds_in_use, NUM_KPDBL_LEDS); is_kpdbl_master_turn_on = false; rc = qpnp_get_config_kpdbl(led, temp); if (rc < 0) { dev_err(&led->spmi_dev->dev, "Unable to read kpdbl config data\n"); goto fail_id_check; } } else { dev_err(&led->spmi_dev->dev, "No LED matching label\n"); rc = -EINVAL; goto fail_id_check; } #ifdef CONFIG_MACH_LGE mutex_init(&led_sequence_lock); #endif if (led->id != QPNP_ID_FLASH1_LED0 && led->id != QPNP_ID_FLASH1_LED1) mutex_init(&led->lock); led->in_order_command_processing = of_property_read_bool (temp, "qcom,in-order-command-processing"); if (led->in_order_command_processing) { /* * the command order from user space needs to be * maintained use ordered workqueue to prevent * concurrency */ led->workqueue = alloc_ordered_workqueue ("led_workqueue", 0); if (!led->workqueue) { rc = -ENOMEM; goto fail_id_check; } } INIT_WORK(&led->work, qpnp_led_work); rc = qpnp_led_initialize(led); if (rc < 0) goto fail_id_check; rc = qpnp_led_set_max_brightness(led); if (rc < 0) goto fail_id_check; rc = led_classdev_register(&spmi->dev, &led->cdev); if (rc) { dev_err(&spmi->dev, "unable to register led %d,rc=%d\n", led->id, rc); goto fail_id_check; } if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1) { rc = sysfs_create_group(&led->cdev.dev->kobj, &led_attr_group); if (rc) goto fail_id_check; } if (led->id == QPNP_ID_LED_MPP) { if (!led->mpp_cfg->pwm_cfg) break; if (led->mpp_cfg->pwm_cfg->mode == PWM_MODE) { rc = sysfs_create_group(&led->cdev.dev->kobj, &pwm_attr_group); if (rc) goto fail_id_check; } if (led->mpp_cfg->pwm_cfg->use_blink) { rc = sysfs_create_group(&led->cdev.dev->kobj, &blink_attr_group); if (rc) goto fail_id_check; rc = sysfs_create_group(&led->cdev.dev->kobj, &lpg_attr_group); if (rc) goto fail_id_check; } else if (led->mpp_cfg->pwm_cfg->mode == LPG_MODE) { rc = sysfs_create_group(&led->cdev.dev->kobj, &lpg_attr_group); if (rc) goto fail_id_check; } } else if ((led->id == QPNP_ID_RGB_RED) || (led->id == QPNP_ID_RGB_GREEN) || (led->id == QPNP_ID_RGB_BLUE)) { if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) { rc = sysfs_create_group(&led->cdev.dev->kobj, &pwm_attr_group); if (rc) goto fail_id_check; } if (led->rgb_cfg->pwm_cfg->use_blink) { rc = sysfs_create_group(&led->cdev.dev->kobj, &blink_attr_group); if (rc) goto fail_id_check; rc = sysfs_create_group(&led->cdev.dev->kobj, &lpg_attr_group); if (rc) goto fail_id_check; } else if (led->rgb_cfg->pwm_cfg->mode == LPG_MODE) { rc = sysfs_create_group(&led->cdev.dev->kobj, &lpg_attr_group); if (rc) goto fail_id_check; } } else if (led->id == QPNP_ID_KPDBL) { if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) { rc = sysfs_create_group(&led->cdev.dev->kobj, &pwm_attr_group); if (rc) goto fail_id_check; } if (led->kpdbl_cfg->pwm_cfg->use_blink) { rc = sysfs_create_group(&led->cdev.dev->kobj, &blink_attr_group); if (rc) goto fail_id_check; rc = sysfs_create_group(&led->cdev.dev->kobj, &lpg_attr_group); if (rc) goto fail_id_check; } else if (led->kpdbl_cfg->pwm_cfg->mode == LPG_MODE) { rc = sysfs_create_group(&led->cdev.dev->kobj, &lpg_attr_group); if (rc) goto fail_id_check; } } /* configure default state */ if (led->default_on) { led->cdev.brightness = led->cdev.max_brightness; __qpnp_led_work(led, led->cdev.brightness); if (led->turn_off_delay_ms > 0) qpnp_led_turn_off(led); } else led->cdev.brightness = LED_OFF; parsed_leds++; } dev_set_drvdata(&spmi->dev, led_array); return 0; fail_id_check: for (i = 0; i < parsed_leds; i++) { if (led_array[i].id != QPNP_ID_FLASH1_LED0 && led_array[i].id != QPNP_ID_FLASH1_LED1) mutex_destroy(&led_array[i].lock); if (led_array[i].in_order_command_processing) destroy_workqueue(led_array[i].workqueue); led_classdev_unregister(&led_array[i].cdev); } return rc; } static int __devexit qpnp_leds_remove(struct spmi_device *spmi) { struct qpnp_led_data *led_array = dev_get_drvdata(&spmi->dev); int i, parsed_leds = led_array->num_leds; for (i = 0; i < parsed_leds; i++) { cancel_work_sync(&led_array[i].work); if (led_array[i].id != QPNP_ID_FLASH1_LED0 && led_array[i].id != QPNP_ID_FLASH1_LED1) mutex_destroy(&led_array[i].lock); if (led_array[i].in_order_command_processing) destroy_workqueue(led_array[i].workqueue); led_classdev_unregister(&led_array[i].cdev); switch (led_array[i].id) { case QPNP_ID_WLED: break; case QPNP_ID_FLASH1_LED0: case QPNP_ID_FLASH1_LED1: if (led_array[i].flash_cfg->flash_reg_get) regulator_put(led_array[i].flash_cfg-> \ flash_boost_reg); if (led_array[i].flash_cfg->torch_enable) regulator_put(led_array[i].flash_cfg->\ torch_boost_reg); sysfs_remove_group(&led_array[i].cdev.dev->kobj, &led_attr_group); break; case QPNP_ID_RGB_RED: case QPNP_ID_RGB_GREEN: case QPNP_ID_RGB_BLUE: if (led_array[i].rgb_cfg->pwm_cfg->mode == PWM_MODE) sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &pwm_attr_group); if (led_array[i].rgb_cfg->pwm_cfg->use_blink) { sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &blink_attr_group); sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &lpg_attr_group); } else if (led_array[i].rgb_cfg->pwm_cfg->mode\ == LPG_MODE) sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &lpg_attr_group); break; case QPNP_ID_LED_MPP: if (!led_array[i].mpp_cfg->pwm_cfg) break; if (led_array[i].mpp_cfg->pwm_cfg->mode == PWM_MODE) sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &pwm_attr_group); if (led_array[i].mpp_cfg->pwm_cfg->use_blink) { sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &blink_attr_group); sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &lpg_attr_group); } else if (led_array[i].mpp_cfg->pwm_cfg->mode\ == LPG_MODE) sysfs_remove_group(&led_array[i].cdev.dev->\ kobj, &lpg_attr_group); break; case QPNP_ID_KPDBL: if (led_array[i].kpdbl_cfg->pwm_cfg->mode == PWM_MODE) sysfs_remove_group(&led_array[i].cdev.dev-> kobj, &pwm_attr_group); if (led_array[i].kpdbl_cfg->pwm_cfg->use_blink) { sysfs_remove_group(&led_array[i].cdev.dev-> kobj, &blink_attr_group); sysfs_remove_group(&led_array[i].cdev.dev-> kobj, &lpg_attr_group); } else if (led_array[i].kpdbl_cfg->pwm_cfg->mode == LPG_MODE) sysfs_remove_group(&led_array[i].cdev.dev-> kobj, &lpg_attr_group); break; default: dev_err(&led_array[i].spmi_dev->dev, "Invalid LED(%d)\n", led_array[i].id); return -EINVAL; } } return 0; } #ifdef CONFIG_OF static struct of_device_id spmi_match_table[] = { { .compatible = "qcom,leds-qpnp",}, { }, }; #else #define spmi_match_table NULL #endif static struct spmi_driver qpnp_leds_driver = { .driver = { .name = "qcom,leds-qpnp", .of_match_table = spmi_match_table, }, .probe = qpnp_leds_probe, .remove = __devexit_p(qpnp_leds_remove), }; static int __init qpnp_led_init(void) { return spmi_driver_register(&qpnp_leds_driver); } module_init(qpnp_led_init); static void __exit qpnp_led_exit(void) { spmi_driver_unregister(&qpnp_leds_driver); } module_exit(qpnp_led_exit); MODULE_DESCRIPTION("QPNP LEDs driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("leds:leds-qpnp");
gpl-2.0
thebohemian/android-buglabs-kernel
net/netrom/af_netrom.c
22
33697
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk) */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/netrom.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/ip.h> #include <net/tcp_states.h> #include <net/arp.h> #include <linux/init.h> static int nr_ndevs = 4; int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL; int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS; int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL; int sysctl_netrom_transport_timeout = NR_DEFAULT_T1; int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2; int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2; int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4; int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW; int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE; int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING; int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS; int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET; static unsigned short circuit = 0x101; static HLIST_HEAD(nr_list); static DEFINE_SPINLOCK(nr_list_lock); static const struct proto_ops nr_proto_ops; /* * NETROM network devices are virtual network devices encapsulating NETROM * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key nr_netdev_xmit_lock_key; static struct lock_class_key nr_netdev_addr_lock_key; static void nr_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); } static void nr_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); } /* * Socket removal during an interrupt is now safe. */ static void nr_remove_socket(struct sock *sk) { spin_lock_bh(&nr_list_lock); sk_del_node_init(sk); spin_unlock_bh(&nr_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void nr_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) if (nr_sk(s)->device == dev) nr_disconnect(s, ENETUNREACH); spin_unlock_bh(&nr_list_lock); } /* * Handle device status changes. */ static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; nr_kill_by_device(dev); nr_rt_device_down(dev); return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void nr_insert_socket(struct sock *sk) { spin_lock_bh(&nr_list_lock); sk_add_node(sk, &nr_list); spin_unlock_bh(&nr_list_lock); } /* * Find a socket that wants to accept the Connect Request we just * received. */ static struct sock *nr_find_listener(ax25_address *addr) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) if (!ax25cmp(&nr_sk(s)->source_addr, addr) && s->sk_state == TCP_LISTEN) { bh_lock_sock(s); goto found; } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find a connected NET/ROM socket given my circuit IDs. */ static struct sock *nr_find_socket(unsigned char index, unsigned char id) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) { struct nr_sock *nr = nr_sk(s); if (nr->my_index == index && nr->my_id == id) { bh_lock_sock(s); goto found; } } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find a connected NET/ROM socket given their circuit IDs. */ static struct sock *nr_find_peer(unsigned char index, unsigned char id, ax25_address *dest) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) { struct nr_sock *nr = nr_sk(s); if (nr->your_index == index && nr->your_id == id && !ax25cmp(&nr->dest_addr, dest)) { bh_lock_sock(s); goto found; } } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find next free circuit ID. */ static unsigned short nr_find_next_circuit(void) { unsigned short id = circuit; unsigned char i, j; struct sock *sk; for (;;) { i = id / 256; j = id % 256; if (i != 0 && j != 0) { if ((sk=nr_find_socket(i, j)) == NULL) break; bh_unlock_sock(sk); } id++; } return id; } /* * Deferred destroy. */ void nr_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void nr_destroy_timer(unsigned long data) { struct sock *sk=(struct sock *)data; bh_lock_sock(sk); sock_hold(sk); nr_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void nr_destroy_socket(struct sock *sk) { struct sk_buff *skb; nr_remove_socket(sk); nr_stop_heartbeat(sk); nr_stop_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); nr_start_heartbeat(skb->sk); nr_sk(skb->sk)->state = NR_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.function = nr_destroy_timer; sk->sk_timer.expires = jiffies + 2 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * NET/ROM socket object. */ static int nr_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); int opt; if (level != SOL_NETROM) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case NETROM_T1: if (opt < 1) return -EINVAL; nr->t1 = opt * HZ; return 0; case NETROM_T2: if (opt < 1) return -EINVAL; nr->t2 = opt * HZ; return 0; case NETROM_N2: if (opt < 1 || opt > 31) return -EINVAL; nr->n2 = opt; return 0; case NETROM_T4: if (opt < 1) return -EINVAL; nr->t4 = opt * HZ; return 0; case NETROM_IDLE: if (opt < 0) return -EINVAL; nr->idle = opt * 60 * HZ; return 0; default: return -ENOPROTOOPT; } } static int nr_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); int val = 0; int len; if (level != SOL_NETROM) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case NETROM_T1: val = nr->t1 / HZ; break; case NETROM_T2: val = nr->t2 / HZ; break; case NETROM_N2: val = nr->n2; break; case NETROM_T4: val = nr->t4 / HZ; break; case NETROM_IDLE: val = nr->idle / (60 * HZ); break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int nr_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; release_sock(sk); return 0; } release_sock(sk); return -EOPNOTSUPP; } static struct proto nr_proto = { .name = "NETROM", .owner = THIS_MODULE, .obj_size = sizeof(struct nr_sock), }; static int nr_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct nr_sock *nr; if (net != &init_net) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto); if (sk == NULL) return -ENOMEM; nr = nr_sk(sk); sock_init_data(sock, sk); sock->ops = &nr_proto_ops; sk->sk_protocol = protocol; skb_queue_head_init(&nr->ack_queue); skb_queue_head_init(&nr->reseq_queue); skb_queue_head_init(&nr->frag_queue); nr_init_timers(sk); nr->t1 = msecs_to_jiffies(sysctl_netrom_transport_timeout); nr->t2 = msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay); nr->n2 = msecs_to_jiffies(sysctl_netrom_transport_maximum_tries); nr->t4 = msecs_to_jiffies(sysctl_netrom_transport_busy_delay); nr->idle = msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout); nr->window = sysctl_netrom_transport_requested_window_size; nr->bpqext = 1; nr->state = NR_STATE_0; return 0; } static struct sock *nr_make_new(struct sock *osk) { struct sock *sk; struct nr_sock *nr, *onr; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot); if (sk == NULL) return NULL; nr = nr_sk(sk); sock_init_data(NULL, sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); skb_queue_head_init(&nr->ack_queue); skb_queue_head_init(&nr->reseq_queue); skb_queue_head_init(&nr->frag_queue); nr_init_timers(sk); onr = nr_sk(osk); nr->t1 = onr->t1; nr->t2 = onr->t2; nr->n2 = onr->n2; nr->t4 = onr->t4; nr->idle = onr->idle; nr->window = onr->window; nr->device = onr->device; nr->bpqext = onr->bpqext; return sk; } static int nr_release(struct socket *sock) { struct sock *sk = sock->sk; struct nr_sock *nr; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); nr = nr_sk(sk); switch (nr->state) { case NR_STATE_0: case NR_STATE_1: case NR_STATE_2: nr_disconnect(sk, 0); nr_destroy_socket(sk); break; case NR_STATE_3: nr_clear_queues(sk); nr->n2count = 0; nr_write_internal(sk, NR_DISCREQ); nr_start_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr->state = NR_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; struct net_device *dev; ax25_uid_assoc *user; ax25_address *source; lock_sock(sk); if (!sock_flag(sk, SOCK_ZAPPED)) { release_sock(sk); return -EINVAL; } if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) { release_sock(sk); return -EINVAL; } if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) { release_sock(sk); return -EINVAL; } if (addr->fsa_ax25.sax25_family != AF_NETROM) { release_sock(sk); return -EINVAL; } if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) { SOCK_DEBUG(sk, "NET/ROM: bind failed: invalid node callsign\n"); release_sock(sk); return -EADDRNOTAVAIL; } /* * Only the super user can set an arbitrary user callsign. */ if (addr->fsa_ax25.sax25_ndigis == 1) { if (!capable(CAP_NET_BIND_SERVICE)) { dev_put(dev); release_sock(sk); return -EACCES; } nr->user_addr = addr->fsa_digipeater[0]; nr->source_addr = addr->fsa_ax25.sax25_call; } else { source = &addr->fsa_ax25.sax25_call; user = ax25_findbyuid(current_euid()); if (user) { nr->user_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { release_sock(sk); dev_put(dev); return -EPERM; } nr->user_addr = *source; } nr->source_addr = *source; } nr->device = dev; nr_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); dev_put(dev); release_sock(sk); SOCK_DEBUG(sk, "NET/ROM: socket is bound\n"); return 0; } static int nr_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr; ax25_address *source = NULL; ax25_uid_assoc *user; struct net_device *dev; int err = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out_release; /* Connect completed during a ERESTARTSYS event */ } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { err = -EISCONN; /* No reconnect on a seqpacket socket */ goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) { err = -EINVAL; goto out_release; } if (addr->sax25_family != AF_NETROM) { err = -EINVAL; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = nr_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } source = (ax25_address *)dev->dev_addr; user = ax25_findbyuid(current_euid()); if (user) { nr->user_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) { dev_put(dev); err = -EPERM; goto out_release; } nr->user_addr = *source; } nr->source_addr = *source; nr->device = dev; dev_put(dev); nr_insert_socket(sk); /* Finish the bind */ } nr->dest_addr = addr->sax25_call; release_sock(sk); circuit = nr_find_next_circuit(); lock_sock(sk); nr->my_index = circuit / 256; nr->my_id = circuit % 256; circuit++; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; nr_establish_data_link(sk); nr->state = NR_STATE_1; nr_start_heartbeat(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk->sk_sleep, &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int nr_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk->sk_sleep, &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ kfree_skb(skb); sk_acceptq_removed(sk); out_release: release_sock(sk); return err; } static int nr_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr; struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); lock_sock(sk); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } sax->fsa_ax25.sax25_family = AF_NETROM; sax->fsa_ax25.sax25_ndigis = 1; sax->fsa_ax25.sax25_call = nr->user_addr; memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater)); sax->fsa_digipeater[0] = nr->dest_addr; *uaddr_len = sizeof(struct full_sockaddr_ax25); } else { sax->fsa_ax25.sax25_family = AF_NETROM; sax->fsa_ax25.sax25_ndigis = 0; sax->fsa_ax25.sax25_call = nr->source_addr; *uaddr_len = sizeof(struct sockaddr_ax25); } release_sock(sk); return 0; } int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) { struct sock *sk; struct sock *make; struct nr_sock *nr_make; ax25_address *src, *dest, *user; unsigned short circuit_index, circuit_id; unsigned short peer_circuit_index, peer_circuit_id; unsigned short frametype, flags, window, timeout; int ret; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the netrom frame start */ src = (ax25_address *)(skb->data + 0); dest = (ax25_address *)(skb->data + 7); circuit_index = skb->data[15]; circuit_id = skb->data[16]; peer_circuit_index = skb->data[17]; peer_circuit_id = skb->data[18]; frametype = skb->data[19] & 0x0F; flags = skb->data[19] & 0xF0; /* * Check for an incoming IP over NET/ROM frame. */ if (frametype == NR_PROTOEXT && circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) { skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); skb_reset_transport_header(skb); return nr_rx_ip(skb, dev); } /* * Find an existing socket connection, based on circuit ID, if it's * a Connect Request base it on their circuit ID. * * Circuit ID 0/0 is not valid but it could still be a "reset" for a * circuit that no longer exists at the other end ... */ sk = NULL; if (circuit_index == 0 && circuit_id == 0) { if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG) sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src); } else { if (frametype == NR_CONNREQ) sk = nr_find_peer(circuit_index, circuit_id, src); else sk = nr_find_socket(circuit_index, circuit_id); } if (sk != NULL) { skb_reset_transport_header(skb); if (frametype == NR_CONNACK && skb->len == 22) nr_sk(sk)->bpqext = 1; else nr_sk(sk)->bpqext = 0; ret = nr_process_rx_frame(sk, skb); bh_unlock_sock(sk); return ret; } /* * Now it should be a CONNREQ. */ if (frametype != NR_CONNREQ) { /* * Here it would be nice to be able to send a reset but * NET/ROM doesn't have one. We've tried to extend the protocol * by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that * apparently kills BPQ boxes... :-( * So now we try to follow the established behaviour of * G8PZT's Xrouter which is sending packets with command type 7 * as an extension of the protocol. */ if (sysctl_netrom_reset_circuit && (frametype != NR_RESET || flags != 0)) nr_transmit_reset(skb, 1); return 0; } sk = nr_find_listener(dest); user = (ax25_address *)(skb->data + 21); if (sk == NULL || sk_acceptq_is_full(sk) || (make = nr_make_new(sk)) == NULL) { nr_transmit_refusal(skb, 0); if (sk) bh_unlock_sock(sk); return 0; } window = skb->data[20]; skb->sk = make; make->sk_state = TCP_ESTABLISHED; /* Fill in his circuit details */ nr_make = nr_sk(make); nr_make->source_addr = *dest; nr_make->dest_addr = *src; nr_make->user_addr = *user; nr_make->your_index = circuit_index; nr_make->your_id = circuit_id; bh_unlock_sock(sk); circuit = nr_find_next_circuit(); bh_lock_sock(sk); nr_make->my_index = circuit / 256; nr_make->my_id = circuit % 256; circuit++; /* Window negotiation */ if (window < nr_make->window) nr_make->window = window; /* L4 timeout negotiation */ if (skb->len == 37) { timeout = skb->data[36] * 256 + skb->data[35]; if (timeout * HZ < nr_make->t1) nr_make->t1 = timeout * HZ; nr_make->bpqext = 1; } else { nr_make->bpqext = 0; } nr_write_internal(make, NR_CONNACK); nr_make->condition = 0x00; nr_make->vs = 0; nr_make->va = 0; nr_make->vr = 0; nr_make->vl = 0; nr_make->state = NR_STATE_3; sk_acceptq_added(sk); skb_queue_head(&sk->sk_receive_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); bh_unlock_sock(sk); nr_insert_socket(make); nr_start_heartbeat(make); nr_start_idletimer(make); return 1; } static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name; int err; struct sockaddr_ax25 sax; struct sk_buff *skb; unsigned char *asmptr; int size; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { err = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } if (nr->device == NULL) { err = -ENETUNREACH; goto out; } if (usax) { if (msg->msg_namelen < sizeof(sax)) { err = -EINVAL; goto out; } sax = *usax; if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) { err = -EISCONN; goto out; } if (sax.sax25_family != AF_NETROM) { err = -EINVAL; goto out; } } else { if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } sax.sax25_family = AF_NETROM; sax.sax25_call = nr->dest_addr; } SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n"); /* Build a packet - the conventional user limit is 236 bytes. We can do ludicrously large NetROM frames but must not overflow */ if (len > 65536) { err = -EMSGSIZE; goto out; } SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) goto out; skb_reserve(skb, size - len); skb_reset_transport_header(skb); /* * Push down the NET/ROM header */ asmptr = skb_push(skb, NR_TRANSPORT_LEN); SOCK_DEBUG(sk, "Building NET/ROM Header.\n"); /* Build a NET/ROM Transport header */ *asmptr++ = nr->your_index; *asmptr++ = nr->your_id; *asmptr++ = 0; /* To be filled in later */ *asmptr++ = 0; /* Ditto */ *asmptr++ = NR_INFO; SOCK_DEBUG(sk, "Built header.\n"); /* * Put the data on the end */ skb_put(skb, len); SOCK_DEBUG(sk, "NET/ROM: Appending user data\n"); /* User data follows immediately after the NET/ROM transport header */ if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { kfree_skb(skb); err = -EFAULT; goto out; } SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n"); if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); err = -ENOTCONN; goto out; } nr_output(sk, skb); /* Shove it onto the queue */ err = len; out: release_sock(sk); return err; } static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (sax != NULL) { sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); skb_free_datagram(sk, skb); release_sock(sk); return copied; } static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; int ret; switch (cmd) { case TIOCOUTQ: { long amount; lock_sock(sk); amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; release_sock(sk); return put_user(amount, (int __user *)argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; lock_sock(sk); /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); return put_user(amount, (int __user *)argp); } case SIOCGSTAMP: lock_sock(sk); ret = sock_get_timestamp(sk, argp); release_sock(sk); return ret; case SIOCGSTAMPNS: lock_sock(sk); ret = sock_get_timestampns(sk, argp); release_sock(sk); return ret; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCNRDECOBS: if (!capable(CAP_NET_ADMIN)) return -EPERM; return nr_rt_ioctl(cmd, argp); default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *nr_info_start(struct seq_file *seq, loff_t *pos) { struct sock *s; struct hlist_node *node; int i = 1; spin_lock_bh(&nr_list_lock); if (*pos == 0) return SEQ_START_TOKEN; sk_for_each(s, node, &nr_list) { if (i == *pos) return s; ++i; } return NULL; } static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (v == SEQ_START_TOKEN) ? sk_head(&nr_list) : sk_next((struct sock *)v); } static void nr_info_stop(struct seq_file *seq, void *v) { spin_unlock_bh(&nr_list_lock); } static int nr_info_show(struct seq_file *seq, void *v) { struct sock *s = v; struct net_device *dev; struct nr_sock *nr; const char *devname; char buf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n"); else { bh_lock_sock(s); nr = nr_sk(s); if ((dev = nr->device) == NULL) devname = "???"; else devname = dev->name; seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr)); seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr)); seq_printf(seq, "%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n", ax2asc(buf, &nr->source_addr), devname, nr->my_index, nr->my_id, nr->your_index, nr->your_id, nr->state, nr->vs, nr->vr, nr->va, ax25_display_timer(&nr->t1timer) / HZ, nr->t1 / HZ, ax25_display_timer(&nr->t2timer) / HZ, nr->t2 / HZ, ax25_display_timer(&nr->t4timer) / HZ, nr->t4 / HZ, ax25_display_timer(&nr->idletimer) / (60 * HZ), nr->idle / (60 * HZ), nr->n2count, nr->n2, nr->window, sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); bh_unlock_sock(s); } return 0; } static const struct seq_operations nr_info_seqops = { .start = nr_info_start, .next = nr_info_next, .stop = nr_info_stop, .show = nr_info_show, }; static int nr_info_open(struct inode *inode, struct file *file) { return seq_open(file, &nr_info_seqops); } static const struct file_operations nr_info_fops = { .owner = THIS_MODULE, .open = nr_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static struct net_proto_family nr_family_ops = { .family = PF_NETROM, .create = nr_create, .owner = THIS_MODULE, }; static const struct proto_ops nr_proto_ops = { .family = PF_NETROM, .owner = THIS_MODULE, .release = nr_release, .bind = nr_bind, .connect = nr_connect, .socketpair = sock_no_socketpair, .accept = nr_accept, .getname = nr_getname, .poll = datagram_poll, .ioctl = nr_ioctl, .listen = nr_listen, .shutdown = sock_no_shutdown, .setsockopt = nr_setsockopt, .getsockopt = nr_getsockopt, .sendmsg = nr_sendmsg, .recvmsg = nr_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block nr_dev_notifier = { .notifier_call = nr_device_event, }; static struct net_device **dev_nr; static struct ax25_protocol nr_pid = { .pid = AX25_P_NETROM, .func = nr_route_frame }; static struct ax25_linkfail nr_linkfail_notifier = { .func = nr_link_failed, }; static int __init nr_proto_init(void) { int i; int rc = proto_register(&nr_proto, 0); if (rc != 0) goto out; if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n"); return -1; } dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_nr == NULL) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); return -1; } for (i = 0; i < nr_ndevs; i++) { char name[IFNAMSIZ]; struct net_device *dev; sprintf(name, "nr%d", i); dev = alloc_netdev(0, name, nr_setup); if (!dev) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); goto fail; } dev->base_addr = i; if (register_netdev(dev)) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); free_netdev(dev); goto fail; } nr_set_lockdep_key(dev); dev_nr[i] = dev; } if (sock_register(&nr_family_ops)) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n"); goto fail; } register_netdevice_notifier(&nr_dev_notifier); ax25_register_pid(&nr_pid); ax25_linkfail_register(&nr_linkfail_notifier); #ifdef CONFIG_SYSCTL nr_register_sysctl(); #endif nr_loopback_init(); proc_net_fops_create(&init_net, "nr", S_IRUGO, &nr_info_fops); proc_net_fops_create(&init_net, "nr_neigh", S_IRUGO, &nr_neigh_fops); proc_net_fops_create(&init_net, "nr_nodes", S_IRUGO, &nr_nodes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_nr[i]); free_netdev(dev_nr[i]); } kfree(dev_nr); proto_unregister(&nr_proto); rc = -1; goto out; } module_init(nr_proto_init); module_param(nr_ndevs, int, 0); MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_NETROM); static void __exit nr_exit(void) { int i; proc_net_remove(&init_net, "nr"); proc_net_remove(&init_net, "nr_neigh"); proc_net_remove(&init_net, "nr_nodes"); nr_loopback_clear(); nr_rt_free(); #ifdef CONFIG_SYSCTL nr_unregister_sysctl(); #endif ax25_linkfail_release(&nr_linkfail_notifier); ax25_protocol_release(AX25_P_NETROM); unregister_netdevice_notifier(&nr_dev_notifier); sock_unregister(PF_NETROM); for (i = 0; i < nr_ndevs; i++) { struct net_device *dev = dev_nr[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_nr); proto_unregister(&nr_proto); } module_exit(nr_exit);
gpl-2.0
mvcsantos/QGIS
src/app/qgsdecorationscalebardialog.cpp
22
2980
/*************************************************************************** * Copyright (C) 2003 by Tim Sutton * * tim@linfiniti.com * * * * This is a plugin generated from the QGIS plugin template * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * ***************************************************************************/ #include "qgsdecorationscalebardialog.h" #include "qgsdecorationscalebar.h" #include "qgslogger.h" #include "qgscontexthelp.h" #include <QColorDialog> #include <QSettings> QgsDecorationScaleBarDialog::QgsDecorationScaleBarDialog( QgsDecorationScaleBar& deco, int units, QWidget* parent ) : QDialog( parent ), mDeco( deco ) { setupUi( this ); QSettings settings; restoreGeometry( settings.value( "/Windows/DecorationScaleBar/geometry" ).toByteArray() ); // set the map units in the spin box switch ( units ) { case 0: spnSize->setSuffix( tr( " metres/km" ) ); break; case 1: spnSize->setSuffix( tr( " feet/miles" ) ); break; case 2: spnSize->setSuffix( tr( " degrees" ) ); break; default: QgsDebugMsg( QString( "Error: not picked up map units - actual value = %1" ).arg( units ) ); } spnSize->setValue( mDeco.mPreferredSize ); chkSnapping->setChecked( mDeco.mSnapping ); cboPlacement->clear(); cboPlacement->addItems( mDeco.mPlacementLabels ); cboPlacement->setCurrentIndex( mDeco.mPlacementIndex ); chkEnable->setChecked( mDeco.enabled() ); cboStyle->clear(); cboStyle->addItems( mDeco.mStyleLabels ); cboStyle->setCurrentIndex( mDeco.mStyleIndex ); pbnChangeColor->setColor( mDeco.mColor ); pbnChangeColor->setContext( "gui" ); pbnChangeColor->setColorDialogTitle( tr( "Select scalebar color" ) ); } QgsDecorationScaleBarDialog::~QgsDecorationScaleBarDialog() { QSettings settings; settings.setValue( "/Windows/DecorationScaleBar/geometry", saveGeometry() ); } void QgsDecorationScaleBarDialog::on_buttonBox_helpRequested() { QgsContextHelp::run( metaObject()->className() ); } void QgsDecorationScaleBarDialog::on_buttonBox_accepted() { mDeco.mPlacementIndex = cboPlacement->currentIndex(); mDeco.mPreferredSize = spnSize->value(); mDeco.mSnapping = chkSnapping->isChecked(); mDeco.setEnabled( chkEnable->isChecked() ); mDeco.mStyleIndex = cboStyle->currentIndex(); mDeco.mColor = pbnChangeColor->color(); accept(); } void QgsDecorationScaleBarDialog::on_buttonBox_rejected() { reject(); }
gpl-2.0
mur47x111/JDK8-concurrent-tagging
src/share/vm/utilities/taskqueue.cpp
22
9099
/* * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "oops/oop.inline.hpp" #include "runtime/os.hpp" #include "runtime/thread.inline.hpp" #include "utilities/debug.hpp" #include "utilities/stack.inline.hpp" #include "utilities/taskqueue.hpp" #ifdef TRACESPINNING uint ParallelTaskTerminator::_total_yields = 0; uint ParallelTaskTerminator::_total_spins = 0; uint ParallelTaskTerminator::_total_peeks = 0; #endif #if TASKQUEUE_STATS const char * const TaskQueueStats::_names[last_stat_id] = { "qpush", "qpop", "qpop-s", "qattempt", "qsteal", "opush", "omax" }; TaskQueueStats & TaskQueueStats::operator +=(const TaskQueueStats & addend) { for (unsigned int i = 0; i < last_stat_id; ++i) { _stats[i] += addend._stats[i]; } return *this; } void TaskQueueStats::print_header(unsigned int line, outputStream* const stream, unsigned int width) { // Use a width w: 1 <= w <= max_width const unsigned int max_width = 40; const unsigned int w = MAX2(MIN2(width, max_width), 1U); if (line == 0) { // spaces equal in width to the header const unsigned int hdr_width = w * last_stat_id + last_stat_id - 1; stream->print("%*s", hdr_width, " "); } else if (line == 1) { // labels stream->print("%*s", w, _names[0]); for (unsigned int i = 1; i < last_stat_id; ++i) { stream->print(" %*s", w, _names[i]); } } else if (line == 2) { // dashed lines char dashes[max_width + 1]; memset(dashes, '-', w); dashes[w] = '\0'; stream->print("%s", dashes); for (unsigned int i = 1; i < last_stat_id; ++i) { stream->print(" %s", dashes); } } } void TaskQueueStats::print(outputStream* stream, unsigned int width) const { #define FMT SIZE_FORMAT_W(*) stream->print(FMT, width, _stats[0]); for (unsigned int i = 1; i < last_stat_id; ++i) { stream->print(" " FMT, width, _stats[i]); } #undef FMT } #ifdef ASSERT // Invariants which should hold after a TaskQueue has been emptied and is // quiescent; they do not hold at arbitrary times. void TaskQueueStats::verify() const { assert(get(push) == get(pop) + get(steal), err_msg("push=" SIZE_FORMAT " pop=" SIZE_FORMAT " steal=" SIZE_FORMAT, get(push), get(pop), get(steal))); assert(get(pop_slow) <= get(pop), err_msg("pop_slow=" SIZE_FORMAT " pop=" SIZE_FORMAT, get(pop_slow), get(pop))); assert(get(steal) <= get(steal_attempt), err_msg("steal=" SIZE_FORMAT " steal_attempt=" SIZE_FORMAT, get(steal), get(steal_attempt))); assert(get(overflow) == 0 || get(push) != 0, err_msg("overflow=" SIZE_FORMAT " push=" SIZE_FORMAT, get(overflow), get(push))); assert(get(overflow_max_len) == 0 || get(overflow) != 0, err_msg("overflow_max_len=" SIZE_FORMAT " overflow=" SIZE_FORMAT, get(overflow_max_len), get(overflow))); } #endif // ASSERT #endif // TASKQUEUE_STATS int TaskQueueSetSuper::randomParkAndMiller(int *seed0) { const int a = 16807; const int m = 2147483647; const int q = 127773; /* m div a */ const int r = 2836; /* m mod a */ assert(sizeof(int) == 4, "I think this relies on that"); int seed = *seed0; int hi = seed / q; int lo = seed % q; int test = a * lo - r * hi; if (test > 0) seed = test; else seed = test + m; *seed0 = seed; return seed; } ParallelTaskTerminator:: ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set) : _n_threads(n_threads), _queue_set(queue_set), _offered_termination(0) {} bool ParallelTaskTerminator::peek_in_queue_set() { return _queue_set->peek(); } void ParallelTaskTerminator::yield() { assert(_offered_termination <= _n_threads, "Invariant"); os::yield(); } void ParallelTaskTerminator::sleep(uint millis) { assert(_offered_termination <= _n_threads, "Invariant"); os::sleep(Thread::current(), millis, false); } bool ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); Atomic::inc(&_offered_termination); uint yield_count = 0; // Number of hard spin loops done since last yield uint hard_spin_count = 0; // Number of iterations in the hard spin loop. uint hard_spin_limit = WorkStealingHardSpins; // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. // If it is greater than 0, then start with a small number // of spins and increase number with each turn at spinning until // the count of hard spins exceeds WorkStealingSpinToYieldRatio. // Then do a yield() call and start spinning afresh. if (WorkStealingSpinToYieldRatio > 0) { hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; hard_spin_limit = MAX2(hard_spin_limit, 1U); } // Remember the initial spin limit. uint hard_spin_start = hard_spin_limit; // Loop waiting for all threads to offer termination or // more work. while (true) { assert(_offered_termination <= _n_threads, "Invariant"); // Are all threads offering termination? if (_offered_termination == _n_threads) { return true; } else { // Look for more work. // Periodically sleep() instead of yield() to give threads // waiting on the cores the chance to grab this code if (yield_count <= WorkStealingYieldsBeforeSleep) { // Do a yield or hardspin. For purposes of deciding whether // to sleep, count this as a yield. yield_count++; // Periodically call yield() instead spinning // After WorkStealingSpinToYieldRatio spins, do a yield() call // and reset the counts and starting limit. if (hard_spin_count > WorkStealingSpinToYieldRatio) { yield(); hard_spin_count = 0; hard_spin_limit = hard_spin_start; #ifdef TRACESPINNING _total_yields++; #endif } else { // Hard spin this time // Increase the hard spinning period but only up to a limit. hard_spin_limit = MIN2(2*hard_spin_limit, (uint) WorkStealingHardSpins); for (uint j = 0; j < hard_spin_limit; j++) { SpinPause(); } hard_spin_count++; #ifdef TRACESPINNING _total_spins++; #endif } } else { if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("ParallelTaskTerminator::offer_termination() " "thread %d sleeps after %d yields", Thread::current(), yield_count); } yield_count = 0; // A sleep will cause this processor to seek work on another processor's // runqueue, if it has nothing else to run (as opposed to the yield // which may only move the thread to the end of the this processor's // runqueue). sleep(WorkStealingSleepMillis); } #ifdef TRACESPINNING _total_peeks++; #endif if (peek_in_queue_set() || (terminator != NULL && terminator->should_exit_termination())) { Atomic::dec(&_offered_termination); assert(_offered_termination < _n_threads, "Invariant"); return false; } } } } #ifdef TRACESPINNING void ParallelTaskTerminator::print_termination_counts() { gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: " UINT32_FORMAT " Total spins: " UINT32_FORMAT " Total peeks: " UINT32_FORMAT, total_yields(), total_spins(), total_peeks()); } #endif void ParallelTaskTerminator::reset_for_reuse() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, "Terminator may still be in use"); _offered_termination = 0; } } #ifdef ASSERT bool ObjArrayTask::is_valid() const { return _obj != NULL && _obj->is_objArray() && _index > 0 && _index < objArrayOop(_obj)->length(); } #endif // ASSERT void ParallelTaskTerminator::reset_for_reuse(int n_threads) { reset_for_reuse(); _n_threads = n_threads; }
gpl-2.0
tidatida/coreboot
src/vendorcode/amd/agesa/f14/Proc/Recovery/Mem/NB/ON/mrndcton.c
22
13098
/* $NoKeywords:$ */ /** * @file * * mrndcton.c * * Northbridge DCT support for Ontario Recovery * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: (Proc/Recovery/Mem) * @e \$Revision: 48803 $ @e \$Date: 2011-03-10 20:18:28 -0700 (Thu, 10 Mar 2011) $ * **/ /* ***************************************************************************** * * Copyright (c) 2011, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * *************************************************************************** * */ /* *---------------------------------------------------------------------------- * MODULES USED * *---------------------------------------------------------------------------- */ #include "AGESA.h" #include "OptionMemory.h" #include "PlatformMemoryConfiguration.h" #include "Ids.h" #include "amdlib.h" #include "mm.h" #include "mn.h" #include "mt.h" #include "mru.h" #include "mrnon.h" #include "cpuFamilyTranslation.h" #include "cpuCommonF14Utilities.h" #include "Filecode.h" #define FILECODE PROC_RECOVERY_MEM_NB_ON_MRNDCTON_FILECODE /*---------------------------------------------------------------------------- * DEFINITIONS AND MACROS * *---------------------------------------------------------------------------- */ #define RECDEF_CSMASK_REG 0x00003FE0 #define RECDEF_DRAM_BASE_REG 0x00000003 #define MAX_RD_DQS_DLY 0x1F #define DEFAULT_WR_ODT_ON_ON 6 #define DEFAULT_RD_ODT_ON_ON 6 /*---------------------------------------------------------------------------- * TYPEDEFS AND STRUCTURES * *---------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------- * PROTOTYPES OF LOCAL FUNCTIONS * *---------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------- * EXPORTED FUNCTIONS * *---------------------------------------------------------------------------- */ /* -----------------------------------------------------------------------------*/ /** * * This function gets platform specific config/timing values from the interface layer and * programs them into DCT. * * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * * @return TRUE - An Error value lower than AGESA_ERROR may have occurred * @return FALSE - An Error value greater than or equal to AGESA_ERROR may have occurred */ BOOLEAN MemRecNPlatformSpecON ( IN OUT MEM_NB_BLOCK *NBPtr ) { UINT32 AddrTmgValue; UINT32 DrvStrValue; UINT32 RODTCSLow; UINT32 WODTCSLow; CH_DEF_STRUCT *ChannelPtr; ChannelPtr = NBPtr->ChannelPtr; if (ChannelPtr->SODimmPresent != 0) { // SODIMM if (ChannelPtr->Dimms == 2) { AddrTmgValue = 0x00000039; DrvStrValue = 0x20222323; } else { AddrTmgValue = 0; DrvStrValue = 0x00002223; } } else { // UDIMM if (ChannelPtr->Dimms == 2) { AddrTmgValue = 0x00390039; DrvStrValue = 0x30222322; } else { AddrTmgValue = 0; DrvStrValue = 0x00112222; if (ChannelPtr->DimmDrPresent != 0) { AddrTmgValue = 0x003B0000; } } } MemRecNSetBitFieldNb (NBPtr, BFODCControl, DrvStrValue); MemRecNSetBitFieldNb (NBPtr, BFAddrTmgControl, AddrTmgValue); RODTCSLow = 0; if (ChannelPtr->Dimms == 2) { RODTCSLow = 0x01010404; WODTCSLow = 0x09050605; } else if (NBPtr->ChannelPtr->DimmDrPresent != 0) { WODTCSLow = 0x00000201; if (NBPtr->DimmToBeUsed == 1) { WODTCSLow = 0x08040000; } } else { WODTCSLow = 0x00000001; if (NBPtr->DimmToBeUsed == 1) { WODTCSLow = 0x00040000; } } MemRecNSetBitFieldNb (NBPtr, BFPhyRODTCSLow, RODTCSLow); MemRecNSetBitFieldNb (NBPtr, BFPhyWODTCSLow, WODTCSLow); return TRUE; } /* -----------------------------------------------------------------------------*/ /** * * This function sets the maximum round-trip latency in the system from the processor to the DRAM * devices and back. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] MaxRcvEnDly - Maximum receiver enable delay value * */ VOID MemRecNSetMaxLatencyON ( IN OUT MEM_NB_BLOCK *NBPtr, IN UINT16 MaxRcvEnDly ) { UINT32 N; UINT32 T; UINT32 P; UINT32 Px2; UINT32 MemClkPeriod; T = MemRecNTotalSyncComponentsClientNb (NBPtr); // P = P + CEIL(MAX (total delay in DqsRcvEn + RdDqsTime)) P = (MaxRcvEnDly + MAX_RD_DQS_DLY + 31) / 32; MemClkPeriod = 1000000 / DDR800_FREQUENCY; // P = P + 6.5 // T = T + 2586 ps Px2 = (P * 2) + 13; T += 2586; // N = (P/(MemClkFreq * 2) + T) * NclkFreq N = ((((Px2 * MemClkPeriod + 3) / 4) + T) * NBPtr->NBClkFreq + 999999) / 1000000; MemRecNSetBitFieldNb (NBPtr, BFMaxLatency, N); } /* -----------------------------------------------------------------------------*/ /** * * Set Dram ODT for mission mode and write leveling mode. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in] OdtMode - Mission mode or write leveling mode * @param[in] ChipSelect - Chip select number * @param[in] TargetCS - Chip select number that is being trained * */ VOID MemRecNSetDramOdtON ( IN OUT MEM_NB_BLOCK *NBPtr, IN ODT_MODE OdtMode, IN UINT8 ChipSelect, IN UINT8 TargetCS ) { UINT8 Dimms; UINT8 DramTerm; UINT8 DramTermDyn; UINT8 WrLvOdt; UINT8 MaxDimmsPerChannel; Dimms = NBPtr->ChannelPtr->Dimms; // Dram nominal termination if (Dimms == 1) { DramTerm = 2; // 120 Ohms DramTermDyn = 0; // Disabled } else { DramTerm = 3; // 40 Ohms DramTermDyn = 2; // 120 Ohms } if (OdtMode == WRITE_LEVELING_MODE) { if (ChipSelect == TargetCS) { if (Dimms >= 2) { DramTerm = DramTermDyn; } MaxDimmsPerChannel = RecGetMaxDimmsPerChannel (NBPtr->RefPtr->PlatformMemoryConfiguration, 0, NBPtr->ChannelPtr->ChannelID); if (MaxDimmsPerChannel == 2) { if (Dimms == 2) { WrLvOdt = 5; } else { // Dimms = 1 if (TargetCS == 0) { WrLvOdt = 1; } else { // TargetCS = 2 WrLvOdt = 4; } } } else { WrLvOdt = 1; } MemRecNSetBitFieldNb (NBPtr, BFWrLvOdt, WrLvOdt); } } MemRecNSetBitFieldNb (NBPtr, BFDramTerm, DramTerm); MemRecNSetBitFieldNb (NBPtr, BFDramTermDyn, DramTermDyn); } /* -----------------------------------------------------------------------------*/ /** * * This function programs the memory controller with configuration parameters * * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * * @return TRUE - An Error value lower than AGESA_ERROR may have occurred * @return FALSE - An Error value greater than or equal to AGESA_ERROR may have occurred */ BOOLEAN MemRecNAutoConfigON ( IN OUT MEM_NB_BLOCK *NBPtr ) { UINT8 Dimm; UINT8 ChipSel; UINT32 CSBase; UINT32 NBClkFreq; UINT8 i; DCT_STRUCT *DCTPtr; CH_DEF_STRUCT *ChannelPtr; DCTPtr = NBPtr->DCTPtr; ChannelPtr = NBPtr->ChannelPtr; // Force NB P-state to NBP0 F14NbPstateInit (DDR800_FREQUENCY, 6, 0, &NBClkFreq, &(NBPtr->MemPtr->StdHeader)); MemRecNSetBitFieldNb (NBPtr, BFNbPsCtrlDis, 1); //Prepare variables for future usage. for (Dimm = 0; Dimm < 2; Dimm++) { if ((ChannelPtr->ChDimmValid & (UINT8) 1 << Dimm) != 0) { DCTPtr->Timings.CsPresent |= (UINT16) 1 << (Dimm * 2); if (((ChannelPtr->DimmDrPresent & (UINT8) 1 << Dimm) == 0) && ((ChannelPtr->DimmQrPresent & (UINT8) 1 << Dimm) == 0)) { continue; } else { DCTPtr->Timings.CsPresent |= (UINT16) 1 << (Dimm * 2 + 1); } } } //Temporarily set all CS Base/Limit registers (corresponding to Dimms exist on a channel) with 256MB size for WL training. CSBase = 0; for (ChipSel = 0; ChipSel < 4; ChipSel++) { if (DCTPtr->Timings.CsPresent & (UINT8) 1 << ChipSel) { CSBase &= (UINT32) ~0x08; //Clear OnDimmMirror bit. if (((ChipSel & 1) != 0) && ((ChannelPtr->DimmMirrorPresent & (UINT8) 1 << (ChipSel >> 1)) != 0)) { CSBase |= (UINT32) 0x08; //Set OnDimmMirror bit. } MemRecNSetBitFieldNb (NBPtr, (BFCSBaseAddr0Reg + ChipSel), (CSBase | 0x01)); CSBase += 0x100000; if ((ChipSel & 1) == 0) { MemRecNSetBitFieldNb (NBPtr, (BFCSMask0Reg + (ChipSel >> 1)), RECDEF_CSMASK_REG); } } } MemRecNSetBitFieldNb (NBPtr, BFDramBaseReg0, RECDEF_DRAM_BASE_REG); MemRecNSetBitFieldNb (NBPtr, BFDramLimitReg0, 0x70000); // Use default values for common registers i = 0; while (NBPtr->RecModeDefRegArray[i] != NULL) { MemRecNSetBitFieldNb (NBPtr, NBPtr->RecModeDefRegArray[i], NBPtr->RecModeDefRegArray[i + 1]); i += 2; } //====================================================================== // Build Dram Config Misc Register Value //====================================================================== // // Max out Non-SPD timings MemRecNSetBitFieldNb (NBPtr, BFTwrrdSD, 0xA); MemRecNSetBitFieldNb (NBPtr, BFTrdrdSD, 0x8); MemRecNSetBitFieldNb (NBPtr, BFTwrwrSD, 0x9); MemRecNSetBitFieldNb (NBPtr, BFWrOdtOnDuration, DEFAULT_WR_ODT_ON_ON); MemRecNSetBitFieldNb (NBPtr, BFRdOdtOnDuration, DEFAULT_RD_ODT_ON_ON); MemRecNSetBitFieldNb (NBPtr, BFWrOdtTrnOnDly, 0); MemRecNSetBitFieldNb (NBPtr, BFRdOdtTrnOnDly, 6 - 5); //====================================================================== // DRAM MRS Register, set ODT //====================================================================== MemRecNSetBitFieldNb (NBPtr, BFBurstCtrl, 1); // // Recommended registers setting BEFORE DRAM device initialization and training // MemRecNSetBitFieldNb (NBPtr, BFDisAutoRefresh, 1); MemRecNSetBitFieldNb (NBPtr, BFZqcsInterval, 0); MemRecNSetBitFieldNb (NBPtr, BFRxMaxDurDllNoLock, 0); MemRecNSetBitFieldNb (NBPtr, BFTxMaxDurDllNoLock, 0); MemRecNSetBitFieldNb (NBPtr, BFEnRxPadStandby, 0); MemRecNSetBitFieldNb (NBPtr, BFPrefCpuDis, 1); MemRecNSetBitFieldNb (NBPtr, BFDctWrLimit, 0x1F); MemRecNSetBitFieldNb (NBPtr, BFEnCpuSerRdBehindNpIoWr, 1); MemRecNSetBitFieldNb (NBPtr, BFDbeGskMemClkAlignMode, 0); MemRecNSetBitFieldNb (NBPtr, BFMaxLatency, 0x12); MemRecNSetBitFieldNb (NBPtr, BFTraceModeEn, 0); // Enable cut through mode for NB P0 MemRecNSetBitFieldNb (NBPtr, BFDisCutThroughMode, 0); return TRUE; } /* -----------------------------------------------------------------------------*/ /** * * This function overrides the seed for hardware based RcvEn training of Ontario. * * @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK * @param[in,out] *SeedPtr - Pointer to the seed value. * * @return TRUE */ BOOLEAN MemRecNOverrideRcvEnSeedON ( IN OUT MEM_NB_BLOCK *NBPtr, IN OUT VOID *SeedPtr ) { *(UINT16*) SeedPtr = 0x5B; return TRUE; } /*---------------------------------------------------------------------------- * LOCAL FUNCTIONS * *---------------------------------------------------------------------------- */
gpl-2.0
apxii/barebox
arch/arm/boards/mioa701/board.c
22
7344
/* * (C) 2011 Robert Jarzmik <robert.jarzmik@free.fr> * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <common.h> #include <driver.h> #include <environment.h> #include <fs.h> #include <init.h> #include <partition.h> #include <led.h> #include <gpio.h> #include <pwm.h> #include <mach/devices.h> #include <mach/mfp-pxa27x.h> #include <mach/pxa-regs.h> #include <mach/udc_pxa2xx.h> #include <mach/mci_pxa2xx.h> #include <asm/armlinux.h> #include <asm/io.h> #include <generated/mach-types.h> #include <asm/mmu.h> #include "mioa701.h" /* * LTM0305A776C LCD panel timings * * see: * - the LTM0305A776C datasheet, * - and the PXA27x Programmers' manual */ static struct pxafb_videomode mioa701_ltm0305a776c = { { .pixclock = 220000, /* CLK=4.545 MHz */ .xres = 240, .yres = 320, .hsync_len = 4, .vsync_len = 2, .left_margin = 6, .right_margin = 4, .upper_margin = 5, .lower_margin = 3, }, .bpp = 16, }; static void mioa701_lcd_power(int on) { gpio_set_value(GPIO87_LCD_POWER, on); } static void mioa701_lcd_backlight(int on) { struct pwm_device *pwm0 = pwm_request("pwm0"); /* * The backlight has a base frequency of 250kHz (<=> 4 ms). */ if (on) { pwm_enable(pwm0); pwm_config(pwm0, 2000 * 1024, 4000 * 1024); } else { pwm_disable(pwm0); } pwm_free(pwm0); } static struct pxafb_platform_data mioa701_pxafb_info = { .mode = &mioa701_ltm0305a776c, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, .lcd_power = mioa701_lcd_power, .backlight_power = mioa701_lcd_backlight, }; #define MIO_LED(_name, _gpio) \ { .gpio = _gpio, .active_low = 1, .led = { .name = #_name, } } static struct gpio_led leds[] = { MIO_LED(charging, GPIO10_LED_nCharging), MIO_LED(blue, GPIO97_LED_nBlue), MIO_LED(orange, GPIO98_LED_nOrange), MIO_LED(vibra, GPIO82_LED_nVibra), MIO_LED(keyboard, GPIO115_LED_nKeyboard), }; static int is_usb_connected(void) { return !gpio_get_value(GPIO13_nUSB_DETECT); } static struct pxa2xx_udc_mach_info mioa701_udc_info = { .udc_is_connected = is_usb_connected, .gpio_pullup = GPIO22_USB_ENABLE, }; static struct pxamci_platform_data mioa701_mmc_info = { .gpio_power = GPIO91_SDIO_EN, }; static int mioa701_devices_init(void) { int i; void *docg3_iospace; pxa_add_pwm((void *)0x40b00000, 0); pxa_add_fb((void *)0x44000000, &mioa701_pxafb_info); pxa_add_mmc((void *)0x41100000, DEVICE_ID_DYNAMIC, &mioa701_mmc_info); docg3_iospace = map_io_sections(0x0, (void *)0xe0000000, 0x2000); add_generic_device("docg3", DEVICE_ID_DYNAMIC, NULL, (ulong) docg3_iospace, 0x2000, IORESOURCE_MEM, NULL); armlinux_set_architecture(MACH_TYPE_MIOA701); for (i = 0; i < ARRAY_SIZE(leds); i++) led_gpio_register(&leds[i]); add_generic_device("pxa27x-udc", 0, NULL, 0x40600000, 1024, IORESOURCE_MEM, &mioa701_udc_info); return 0; } device_initcall(mioa701_devices_init); static unsigned long mioa701_pin_config[] = { /* Mio global */ MIO_CFG_OUT(GPIO9_CHARGE_EN, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO18_POWEROFF, AF0, DRIVE_LOW), MFP_CFG_OUT(GPIO3, AF0, DRIVE_HIGH), MFP_CFG_OUT(GPIO4, AF0, DRIVE_HIGH), MIO_CFG_IN(GPIO80_MAYBE_CHARGE_VDROP, AF0), /* Backlight PWM 0 */ GPIO16_PWM0_OUT, /* LCD */ GPIOxx_LCD_TFT_16BPP, MIO_CFG_OUT(GPIO87_LCD_POWER, AF0, DRIVE_LOW), /* MMC */ GPIO32_MMC_CLK, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, GPIO112_MMC_CMD, MIO_CFG_IN(GPIO78_SDIO_RO, AF0), MIO_CFG_IN(GPIO15_SDIO_INSERT, AF0), MIO_CFG_OUT(GPIO91_SDIO_EN, AF0, DRIVE_LOW), /* USB */ MIO_CFG_IN(GPIO13_nUSB_DETECT, AF0), MIO_CFG_OUT(GPIO22_USB_ENABLE, AF0, DRIVE_LOW), /* QCI */ GPIO12_CIF_DD_7, GPIO17_CIF_DD_6, GPIO50_CIF_DD_3, GPIO51_CIF_DD_2, GPIO52_CIF_DD_4, GPIO53_CIF_MCLK, GPIO54_CIF_PCLK, GPIO55_CIF_DD_1, GPIO81_CIF_DD_0, GPIO82_CIF_DD_5, GPIO84_CIF_FV, GPIO85_CIF_LV, /* Bluetooth */ MIO_CFG_IN(GPIO14_BT_nACTIVITY, AF0), GPIO44_BTUART_CTS, GPIO42_BTUART_RXD, GPIO45_BTUART_RTS, GPIO43_BTUART_TXD, MIO_CFG_OUT(GPIO83_BT_ON, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO77_BT_UNKNOWN1, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO86_BT_MAYBE_nRESET, AF0, DRIVE_HIGH), /* GPS */ MIO_CFG_OUT(GPIO23_GPS_UNKNOWN1, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO26_GPS_ON, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO27_GPS_RESET, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO106_GPS_UNKNOWN2, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO107_GPS_UNKNOWN3, AF0, DRIVE_LOW), GPIO46_STUART_RXD, GPIO47_STUART_TXD, /* GSM */ MIO_CFG_OUT(GPIO24_GSM_MOD_RESET_CMD, AF0, DRIVE_LOW), MIO_CFG_OUT(GPIO88_GSM_nMOD_ON_CMD, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO90_GSM_nMOD_OFF_CMD, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO114_GSM_nMOD_DTE_UART_STATE, AF0, DRIVE_HIGH), MIO_CFG_IN(GPIO25_GSM_MOD_ON_STATE, AF0), MIO_CFG_IN(GPIO113_GSM_EVENT, AF0) | WAKEUP_ON_EDGE_BOTH, GPIO34_FFUART_RXD, GPIO35_FFUART_CTS, GPIO36_FFUART_DCD, GPIO37_FFUART_DSR, GPIO39_FFUART_TXD, GPIO40_FFUART_DTR, GPIO41_FFUART_RTS, /* Sound */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, GPIO89_AC97_SYSCLK, MIO_CFG_IN(GPIO12_HPJACK_INSERT, AF0), /* Leds */ MIO_CFG_OUT(GPIO10_LED_nCharging, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO97_LED_nBlue, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO98_LED_nOrange, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO82_LED_nVibra, AF0, DRIVE_HIGH), MIO_CFG_OUT(GPIO115_LED_nKeyboard, AF0, DRIVE_HIGH), /* Keyboard */ MIO_CFG_IN(GPIO0_KEY_POWER, AF0) | WAKEUP_ON_EDGE_BOTH, MIO_CFG_IN(GPIO93_KEY_VOLUME_UP, AF0), MIO_CFG_IN(GPIO94_KEY_VOLUME_DOWN, AF0), GPIO100_KP_MKIN_0, GPIO101_KP_MKIN_1, GPIO102_KP_MKIN_2, GPIO103_KP_MKOUT_0, GPIO104_KP_MKOUT_1, GPIO105_KP_MKOUT_2, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* Unknown */ MFP_CFG_IN(GPIO20, AF0), MFP_CFG_IN(GPIO21, AF0), MFP_CFG_IN(GPIO33, AF0), MFP_CFG_OUT(GPIO49, AF0, DRIVE_HIGH), MFP_CFG_OUT(GPIO57, AF0, DRIVE_HIGH), MFP_CFG_IN(GPIO96, AF0), MFP_CFG_OUT(GPIO116, AF0, DRIVE_HIGH), }; static int mioa701_coredevice_init(void) { unsigned int cclk; /* route pins */ pxa2xx_mfp_config(ARRAY_AND_SIZE(mioa701_pin_config)); /* * Put the board in superspeed (520 MHz) to speed-up logo/OS loading. * This requires to command the Maxim 1586 to upgrade core voltage to * 1.475 V, on the power I2C bus (device 0x14). */ CKEN |= CKEN_PWRI2C; CCCR = CCCR_A | 0x20290; PCFR = PCFR_GPR_EN | PCFR_FVC | PCFR_DC_EN | PCFR_PI2C_EN | PCFR_OPDE; PCMD(0) = PCMD_LC | 0x1f; PVCR = 0x14; cclk = 0x0b; asm volatile("mcr p14, 0, %0, c6, c0, 0 @ set CCLK" : : "r" (cclk) : "cc"); barebox_set_model("Scoter Mitac Mio A701"); barebox_set_hostname("mioa701"); return 0; } coredevice_initcall(mioa701_coredevice_init); static int mioa701_mem_init(void) { arm_add_mem_device("ram0", 0xa0000000, 64 * 1024 * 1024); return 0; } mem_initcall(mioa701_mem_init);
gpl-2.0
wetek-enigma/linux-wetek-3.14.y
drivers/firewire/core-device.c
790
34771
/* * Device probing and sysfs code. * * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/bug.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/idr.h> #include <linux/jiffies.h> #include <linux/kobject.h> #include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <asm/byteorder.h> #include "core.h" void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p) { ci->p = p + 1; ci->end = ci->p + (p[0] >> 16); } EXPORT_SYMBOL(fw_csr_iterator_init); int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value) { *key = *ci->p >> 24; *value = *ci->p & 0xffffff; return ci->p++ < ci->end; } EXPORT_SYMBOL(fw_csr_iterator_next); static const u32 *search_leaf(const u32 *directory, int search_key) { struct fw_csr_iterator ci; int last_key = 0, key, value; fw_csr_iterator_init(&ci, directory); while (fw_csr_iterator_next(&ci, &key, &value)) { if (last_key == search_key && key == (CSR_DESCRIPTOR | CSR_LEAF)) return ci.p - 1 + value; last_key = key; } return NULL; } static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) { unsigned int quadlets, i; char c; if (!size || !buf) return -EINVAL; quadlets = min(block[0] >> 16, 256U); if (quadlets < 2) return -ENODATA; if (block[1] != 0 || block[2] != 0) /* unknown language/character set */ return -ENODATA; block += 3; quadlets -= 2; for (i = 0; i < quadlets * 4 && i < size - 1; i++) { c = block[i / 4] >> (24 - 8 * (i % 4)); if (c == '\0') break; buf[i] = c; } buf[i] = '\0'; return i; } /** * fw_csr_string() - reads a string from the configuration ROM * @directory: e.g. root directory or unit directory * @key: the key of the preceding directory entry * @buf: where to put the string * @size: size of @buf, in bytes * * The string is taken from a minimal ASCII text descriptor leaf after * the immediate entry with @key. The string is zero-terminated. * Returns strlen(buf) or a negative error code. */ int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) { const u32 *leaf = search_leaf(directory, key); if (!leaf) return -ENOENT; return textual_leaf_to_string(leaf, buf, size); } EXPORT_SYMBOL(fw_csr_string); static void get_ids(const u32 *directory, int *id) { struct fw_csr_iterator ci; int key, value; fw_csr_iterator_init(&ci, directory); while (fw_csr_iterator_next(&ci, &key, &value)) { switch (key) { case CSR_VENDOR: id[0] = value; break; case CSR_MODEL: id[1] = value; break; case CSR_SPECIFIER_ID: id[2] = value; break; case CSR_VERSION: id[3] = value; break; } } } static void get_modalias_ids(struct fw_unit *unit, int *id) { get_ids(&fw_parent_device(unit)->config_rom[5], id); get_ids(unit->directory, id); } static bool match_ids(const struct ieee1394_device_id *id_table, int *id) { int match = 0; if (id[0] == id_table->vendor_id) match |= IEEE1394_MATCH_VENDOR_ID; if (id[1] == id_table->model_id) match |= IEEE1394_MATCH_MODEL_ID; if (id[2] == id_table->specifier_id) match |= IEEE1394_MATCH_SPECIFIER_ID; if (id[3] == id_table->version) match |= IEEE1394_MATCH_VERSION; return (match & id_table->match_flags) == id_table->match_flags; } static const struct ieee1394_device_id *unit_match(struct device *dev, struct device_driver *drv) { const struct ieee1394_device_id *id_table = container_of(drv, struct fw_driver, driver)->id_table; int id[] = {0, 0, 0, 0}; get_modalias_ids(fw_unit(dev), id); for (; id_table->match_flags != 0; id_table++) if (match_ids(id_table, id)) return id_table; return NULL; } static bool is_fw_unit(struct device *dev); static int fw_unit_match(struct device *dev, struct device_driver *drv) { /* We only allow binding to fw_units. */ return is_fw_unit(dev) && unit_match(dev, drv) != NULL; } static int fw_unit_probe(struct device *dev) { struct fw_driver *driver = container_of(dev->driver, struct fw_driver, driver); return driver->probe(fw_unit(dev), unit_match(dev, dev->driver)); } static int fw_unit_remove(struct device *dev) { struct fw_driver *driver = container_of(dev->driver, struct fw_driver, driver); return driver->remove(fw_unit(dev)), 0; } static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) { int id[] = {0, 0, 0, 0}; get_modalias_ids(unit, id); return snprintf(buffer, buffer_size, "ieee1394:ven%08Xmo%08Xsp%08Xver%08X", id[0], id[1], id[2], id[3]); } static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) { struct fw_unit *unit = fw_unit(dev); char modalias[64]; get_modalias(unit, modalias, sizeof(modalias)); if (add_uevent_var(env, "MODALIAS=%s", modalias)) return -ENOMEM; return 0; } struct bus_type fw_bus_type = { .name = "firewire", .match = fw_unit_match, .probe = fw_unit_probe, .remove = fw_unit_remove, }; EXPORT_SYMBOL(fw_bus_type); int fw_device_enable_phys_dma(struct fw_device *device) { int generation = device->generation; /* device->node_id, accessed below, must not be older than generation */ smp_rmb(); return device->card->driver->enable_phys_dma(device->card, device->node_id, generation); } EXPORT_SYMBOL(fw_device_enable_phys_dma); struct config_rom_attribute { struct device_attribute attr; u32 key; }; static ssize_t show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); struct fw_csr_iterator ci; const u32 *dir; int key, value, ret = -ENOENT; down_read(&fw_device_rwsem); if (is_fw_unit(dev)) dir = fw_unit(dev)->directory; else dir = fw_device(dev)->config_rom + 5; fw_csr_iterator_init(&ci, dir); while (fw_csr_iterator_next(&ci, &key, &value)) if (attr->key == key) { ret = snprintf(buf, buf ? PAGE_SIZE : 0, "0x%06x\n", value); break; } up_read(&fw_device_rwsem); return ret; } #define IMMEDIATE_ATTR(name, key) \ { __ATTR(name, S_IRUGO, show_immediate, NULL), key } static ssize_t show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); const u32 *dir; size_t bufsize; char dummy_buf[2]; int ret; down_read(&fw_device_rwsem); if (is_fw_unit(dev)) dir = fw_unit(dev)->directory; else dir = fw_device(dev)->config_rom + 5; if (buf) { bufsize = PAGE_SIZE - 1; } else { buf = dummy_buf; bufsize = 1; } ret = fw_csr_string(dir, attr->key, buf, bufsize); if (ret >= 0) { /* Strip trailing whitespace and add newline. */ while (ret > 0 && isspace(buf[ret - 1])) ret--; strcpy(buf + ret, "\n"); ret++; } up_read(&fw_device_rwsem); return ret; } #define TEXT_LEAF_ATTR(name, key) \ { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key } static struct config_rom_attribute config_rom_attributes[] = { IMMEDIATE_ATTR(vendor, CSR_VENDOR), IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION), IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID), IMMEDIATE_ATTR(version, CSR_VERSION), IMMEDIATE_ATTR(model, CSR_MODEL), TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR), TEXT_LEAF_ATTR(model_name, CSR_MODEL), TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), }; static void init_fw_attribute_group(struct device *dev, struct device_attribute *attrs, struct fw_attribute_group *group) { struct device_attribute *attr; int i, j; for (j = 0; attrs[j].attr.name != NULL; j++) group->attrs[j] = &attrs[j].attr; for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) { attr = &config_rom_attributes[i].attr; if (attr->show(dev, attr, NULL) < 0) continue; group->attrs[j++] = &attr->attr; } group->attrs[j] = NULL; group->groups[0] = &group->group; group->groups[1] = NULL; group->group.attrs = group->attrs; dev->groups = (const struct attribute_group **) group->groups; } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_unit *unit = fw_unit(dev); int length; length = get_modalias(unit, buf, PAGE_SIZE); strcpy(buf + length, "\n"); return length + 1; } static ssize_t rom_index_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev->parent); struct fw_unit *unit = fw_unit(dev); return snprintf(buf, PAGE_SIZE, "%d\n", (int)(unit->directory - device->config_rom)); } static struct device_attribute fw_unit_attributes[] = { __ATTR_RO(modalias), __ATTR_RO(rom_index), __ATTR_NULL, }; static ssize_t config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); size_t length; down_read(&fw_device_rwsem); length = device->config_rom_length * 4; memcpy(buf, device->config_rom, length); up_read(&fw_device_rwsem); return length; } static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); int ret; down_read(&fw_device_rwsem); ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]); up_read(&fw_device_rwsem); return ret; } static ssize_t is_local_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); return sprintf(buf, "%u\n", device->is_local); } static int units_sprintf(char *buf, const u32 *directory) { struct fw_csr_iterator ci; int key, value; int specifier_id = 0; int version = 0; fw_csr_iterator_init(&ci, directory); while (fw_csr_iterator_next(&ci, &key, &value)) { switch (key) { case CSR_SPECIFIER_ID: specifier_id = value; break; case CSR_VERSION: version = value; break; } } return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version); } static ssize_t units_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); struct fw_csr_iterator ci; int key, value, i = 0; down_read(&fw_device_rwsem); fw_csr_iterator_init(&ci, &device->config_rom[5]); while (fw_csr_iterator_next(&ci, &key, &value)) { if (key != (CSR_UNIT | CSR_DIRECTORY)) continue; i += units_sprintf(&buf[i], ci.p + value - 1); if (i >= PAGE_SIZE - (8 + 1 + 8 + 1)) break; } up_read(&fw_device_rwsem); if (i) buf[i - 1] = '\n'; return i; } static struct device_attribute fw_device_attributes[] = { __ATTR_RO(config_rom), __ATTR_RO(guid), __ATTR_RO(is_local), __ATTR_RO(units), __ATTR_NULL, }; static int read_rom(struct fw_device *device, int generation, int index, u32 *data) { u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4; int i, rcode; /* device->node_id, accessed below, must not be older than generation */ smp_rmb(); for (i = 10; i < 100; i += 10) { rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, device->node_id, generation, device->max_speed, offset, data, 4); if (rcode != RCODE_BUSY) break; msleep(i); } be32_to_cpus(data); return rcode; } #define MAX_CONFIG_ROM_SIZE 256 /* * Read the bus info block, perform a speed probe, and read all of the rest of * the config ROM. We do all this with a cached bus generation. If the bus * generation changes under us, read_config_rom will fail and get retried. * It's better to start all over in this case because the node from which we * are reading the ROM may have changed the ROM during the reset. * Returns either a result code or a negative error code. */ static int read_config_rom(struct fw_device *device, int generation) { struct fw_card *card = device->card; const u32 *old_rom, *new_rom; u32 *rom, *stack; u32 sp, key; int i, end, length, ret; rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE + sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL); if (rom == NULL) return -ENOMEM; stack = &rom[MAX_CONFIG_ROM_SIZE]; memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE); device->max_speed = SCODE_100; /* First read the bus info block. */ for (i = 0; i < 5; i++) { ret = read_rom(device, generation, i, &rom[i]); if (ret != RCODE_COMPLETE) goto out; /* * As per IEEE1212 7.2, during initialization, devices can * reply with a 0 for the first quadlet of the config * rom to indicate that they are booting (for example, * if the firmware is on the disk of a external * harddisk). In that case we just fail, and the * retry mechanism will try again later. */ if (i == 0 && rom[i] == 0) { ret = RCODE_BUSY; goto out; } } device->max_speed = device->node->max_speed; /* * Determine the speed of * - devices with link speed less than PHY speed, * - devices with 1394b PHY (unless only connected to 1394a PHYs), * - all devices if there are 1394b repeaters. * Note, we cannot use the bus info block's link_spd as starting point * because some buggy firmwares set it lower than necessary and because * 1394-1995 nodes do not have the field. */ if ((rom[2] & 0x7) < device->max_speed || device->max_speed == SCODE_BETA || card->beta_repeaters_present) { u32 dummy; /* for S1600 and S3200 */ if (device->max_speed == SCODE_BETA) device->max_speed = card->link_speed; while (device->max_speed > SCODE_100) { if (read_rom(device, generation, 0, &dummy) == RCODE_COMPLETE) break; device->max_speed--; } } /* * Now parse the config rom. The config rom is a recursive * directory structure so we parse it using a stack of * references to the blocks that make up the structure. We * push a reference to the root directory on the stack to * start things off. */ length = i; sp = 0; stack[sp++] = 0xc0000005; while (sp > 0) { /* * Pop the next block reference of the stack. The * lower 24 bits is the offset into the config rom, * the upper 8 bits are the type of the reference the * block. */ key = stack[--sp]; i = key & 0xffffff; if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) { ret = -ENXIO; goto out; } /* Read header quadlet for the block to get the length. */ ret = read_rom(device, generation, i, &rom[i]); if (ret != RCODE_COMPLETE) goto out; end = i + (rom[i] >> 16) + 1; if (end > MAX_CONFIG_ROM_SIZE) { /* * This block extends outside the config ROM which is * a firmware bug. Ignore this whole block, i.e. * simply set a fake block length of 0. */ fw_err(card, "skipped invalid ROM block %x at %llx\n", rom[i], i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); rom[i] = 0; end = i; } i++; /* * Now read in the block. If this is a directory * block, check the entries as we read them to see if * it references another block, and push it in that case. */ for (; i < end; i++) { ret = read_rom(device, generation, i, &rom[i]); if (ret != RCODE_COMPLETE) goto out; if ((key >> 30) != 3 || (rom[i] >> 30) < 2) continue; /* * Offset points outside the ROM. May be a firmware * bug or an Extended ROM entry (IEEE 1212-2001 clause * 7.7.18). Simply overwrite this pointer here by a * fake immediate entry so that later iterators over * the ROM don't have to check offsets all the time. */ if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) { fw_err(card, "skipped unsupported ROM entry %x at %llx\n", rom[i], i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); rom[i] = 0; continue; } stack[sp++] = i + rom[i]; } if (length < i) length = i; } old_rom = device->config_rom; new_rom = kmemdup(rom, length * 4, GFP_KERNEL); if (new_rom == NULL) { ret = -ENOMEM; goto out; } down_write(&fw_device_rwsem); device->config_rom = new_rom; device->config_rom_length = length; up_write(&fw_device_rwsem); kfree(old_rom); ret = RCODE_COMPLETE; device->max_rec = rom[2] >> 12 & 0xf; device->cmc = rom[2] >> 30 & 1; device->irmc = rom[2] >> 31 & 1; out: kfree(rom); return ret; } static void fw_unit_release(struct device *dev) { struct fw_unit *unit = fw_unit(dev); fw_device_put(fw_parent_device(unit)); kfree(unit); } static struct device_type fw_unit_type = { .uevent = fw_unit_uevent, .release = fw_unit_release, }; static bool is_fw_unit(struct device *dev) { return dev->type == &fw_unit_type; } static void create_units(struct fw_device *device) { struct fw_csr_iterator ci; struct fw_unit *unit; int key, value, i; i = 0; fw_csr_iterator_init(&ci, &device->config_rom[5]); while (fw_csr_iterator_next(&ci, &key, &value)) { if (key != (CSR_UNIT | CSR_DIRECTORY)) continue; /* * Get the address of the unit directory and try to * match the drivers id_tables against it. */ unit = kzalloc(sizeof(*unit), GFP_KERNEL); if (unit == NULL) continue; unit->directory = ci.p + value - 1; unit->device.bus = &fw_bus_type; unit->device.type = &fw_unit_type; unit->device.parent = &device->device; dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++); BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) < ARRAY_SIZE(fw_unit_attributes) + ARRAY_SIZE(config_rom_attributes)); init_fw_attribute_group(&unit->device, fw_unit_attributes, &unit->attribute_group); if (device_register(&unit->device) < 0) goto skip_unit; fw_device_get(device); continue; skip_unit: kfree(unit); } } static int shutdown_unit(struct device *device, void *data) { device_unregister(device); return 0; } /* * fw_device_rwsem acts as dual purpose mutex: * - serializes accesses to fw_device_idr, * - serializes accesses to fw_device.config_rom/.config_rom_length and * fw_unit.directory, unless those accesses happen at safe occasions */ DECLARE_RWSEM(fw_device_rwsem); DEFINE_IDR(fw_device_idr); int fw_cdev_major; struct fw_device *fw_device_get_by_devt(dev_t devt) { struct fw_device *device; down_read(&fw_device_rwsem); device = idr_find(&fw_device_idr, MINOR(devt)); if (device) fw_device_get(device); up_read(&fw_device_rwsem); return device; } struct workqueue_struct *fw_workqueue; EXPORT_SYMBOL(fw_workqueue); static void fw_schedule_device_work(struct fw_device *device, unsigned long delay) { queue_delayed_work(fw_workqueue, &device->work, delay); } /* * These defines control the retry behavior for reading the config * rom. It shouldn't be necessary to tweak these; if the device * doesn't respond to a config rom read within 10 seconds, it's not * going to respond at all. As for the initial delay, a lot of * devices will be able to respond within half a second after bus * reset. On the other hand, it's not really worth being more * aggressive than that, since it scales pretty well; if 10 devices * are plugged in, they're all getting read within one second. */ #define MAX_RETRIES 10 #define RETRY_DELAY (3 * HZ) #define INITIAL_DELAY (HZ / 2) #define SHUTDOWN_DELAY (2 * HZ) static void fw_device_shutdown(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); int minor = MINOR(device->device.devt); if (time_before64(get_jiffies_64(), device->card->reset_jiffies + SHUTDOWN_DELAY) && !list_empty(&device->card->link)) { fw_schedule_device_work(device, SHUTDOWN_DELAY); return; } if (atomic_cmpxchg(&device->state, FW_DEVICE_GONE, FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE) return; fw_device_cdev_remove(device); device_for_each_child(&device->device, NULL, shutdown_unit); device_unregister(&device->device); down_write(&fw_device_rwsem); idr_remove(&fw_device_idr, minor); up_write(&fw_device_rwsem); fw_device_put(device); } static void fw_device_release(struct device *dev) { struct fw_device *device = fw_device(dev); struct fw_card *card = device->card; unsigned long flags; /* * Take the card lock so we don't set this to NULL while a * FW_NODE_UPDATED callback is being handled or while the * bus manager work looks at this node. */ spin_lock_irqsave(&card->lock, flags); device->node->data = NULL; spin_unlock_irqrestore(&card->lock, flags); fw_node_put(device->node); kfree(device->config_rom); kfree(device); fw_card_put(card); } static struct device_type fw_device_type = { .release = fw_device_release, }; static bool is_fw_device(struct device *dev) { return dev->type == &fw_device_type; } static int update_unit(struct device *dev, void *data) { struct fw_unit *unit = fw_unit(dev); struct fw_driver *driver = (struct fw_driver *)dev->driver; if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { device_lock(dev); driver->update(unit); device_unlock(dev); } return 0; } static void fw_device_update(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); fw_device_cdev_update(device); device_for_each_child(&device->device, NULL, update_unit); } /* * If a device was pending for deletion because its node went away but its * bus info block and root directory header matches that of a newly discovered * device, revive the existing fw_device. * The newly allocated fw_device becomes obsolete instead. */ static int lookup_existing_device(struct device *dev, void *data) { struct fw_device *old = fw_device(dev); struct fw_device *new = data; struct fw_card *card = new->card; int match = 0; if (!is_fw_device(dev)) return 0; down_read(&fw_device_rwsem); /* serialize config_rom access */ spin_lock_irq(&card->lock); /* serialize node access */ if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 && atomic_cmpxchg(&old->state, FW_DEVICE_GONE, FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { struct fw_node *current_node = new->node; struct fw_node *obsolete_node = old->node; new->node = obsolete_node; new->node->data = new; old->node = current_node; old->node->data = old; old->max_speed = new->max_speed; old->node_id = current_node->node_id; smp_wmb(); /* update node_id before generation */ old->generation = card->generation; old->config_rom_retries = 0; fw_notice(card, "rediscovered device %s\n", dev_name(dev)); old->workfn = fw_device_update; fw_schedule_device_work(old, 0); if (current_node == card->root_node) fw_schedule_bm_work(card, 0); match = 1; } spin_unlock_irq(&card->lock); up_read(&fw_device_rwsem); return match; } enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; static void set_broadcast_channel(struct fw_device *device, int generation) { struct fw_card *card = device->card; __be32 data; int rcode; if (!card->broadcast_channel_allocated) return; /* * The Broadcast_Channel Valid bit is required by nodes which want to * transmit on this channel. Such transmissions are practically * exclusive to IP over 1394 (RFC 2734). IP capable nodes are required * to be IRM capable and have a max_rec of 8 or more. We use this fact * to narrow down to which nodes we send Broadcast_Channel updates. */ if (!device->irmc || device->max_rec < 8) return; /* * Some 1394-1995 nodes crash if this 1394a-2000 register is written. * Perform a read test first. */ if (device->bc_implemented == BC_UNKNOWN) { rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, device->node_id, generation, device->max_speed, CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, &data, 4); switch (rcode) { case RCODE_COMPLETE: if (data & cpu_to_be32(1 << 31)) { device->bc_implemented = BC_IMPLEMENTED; break; } /* else fall through to case address error */ case RCODE_ADDRESS_ERROR: device->bc_implemented = BC_UNIMPLEMENTED; } } if (device->bc_implemented == BC_IMPLEMENTED) { data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | BROADCAST_CHANNEL_VALID); fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, device->node_id, generation, device->max_speed, CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, &data, 4); } } int fw_device_set_broadcast_channel(struct device *dev, void *gen) { if (is_fw_device(dev)) set_broadcast_channel(fw_device(dev), (long)gen); return 0; } static void fw_device_init(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); struct fw_card *card = device->card; struct device *revived_dev; int minor, ret; /* * All failure paths here set node->data to NULL, so that we * don't try to do device_for_each_child() on a kfree()'d * device. */ ret = read_config_rom(device, device->generation); if (ret != RCODE_COMPLETE) { if (device->config_rom_retries < MAX_RETRIES && atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { device->config_rom_retries++; fw_schedule_device_work(device, RETRY_DELAY); } else { if (device->node->link_on) fw_notice(card, "giving up on node %x: reading config rom failed: %s\n", device->node_id, fw_rcode_string(ret)); if (device->node == card->root_node) fw_schedule_bm_work(card, 0); fw_device_release(&device->device); } return; } revived_dev = device_find_child(card->device, device, lookup_existing_device); if (revived_dev) { put_device(revived_dev); fw_device_release(&device->device); return; } device_initialize(&device->device); fw_device_get(device); down_write(&fw_device_rwsem); minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS, GFP_KERNEL); up_write(&fw_device_rwsem); if (minor < 0) goto error; device->device.bus = &fw_bus_type; device->device.type = &fw_device_type; device->device.parent = card->device; device->device.devt = MKDEV(fw_cdev_major, minor); dev_set_name(&device->device, "fw%d", minor); BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) < ARRAY_SIZE(fw_device_attributes) + ARRAY_SIZE(config_rom_attributes)); init_fw_attribute_group(&device->device, fw_device_attributes, &device->attribute_group); if (device_add(&device->device)) { fw_err(card, "failed to add device\n"); goto error_with_cdev; } create_units(device); /* * Transition the device to running state. If it got pulled * out from under us while we did the intialization work, we * have to shut down the device again here. Normally, though, * fw_node_event will be responsible for shutting it down when * necessary. We have to use the atomic cmpxchg here to avoid * racing with the FW_NODE_DESTROYED case in * fw_node_event(). */ if (atomic_cmpxchg(&device->state, FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); } else { fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", dev_name(&device->device), device->config_rom[3], device->config_rom[4], 1 << device->max_speed); device->config_rom_retries = 0; set_broadcast_channel(device, device->generation); add_device_randomness(&device->config_rom[3], 8); } /* * Reschedule the IRM work if we just finished reading the * root node config rom. If this races with a bus reset we * just end up running the IRM work a couple of extra times - * pretty harmless. */ if (device->node == card->root_node) fw_schedule_bm_work(card, 0); return; error_with_cdev: down_write(&fw_device_rwsem); idr_remove(&fw_device_idr, minor); up_write(&fw_device_rwsem); error: fw_device_put(device); /* fw_device_idr's reference */ put_device(&device->device); /* our reference */ } /* Reread and compare bus info block and header of root directory */ static int reread_config_rom(struct fw_device *device, int generation, bool *changed) { u32 q; int i, rcode; for (i = 0; i < 6; i++) { rcode = read_rom(device, generation, i, &q); if (rcode != RCODE_COMPLETE) return rcode; if (i == 0 && q == 0) /* inaccessible (see read_config_rom); retry later */ return RCODE_BUSY; if (q != device->config_rom[i]) { *changed = true; return RCODE_COMPLETE; } } *changed = false; return RCODE_COMPLETE; } static void fw_device_refresh(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); struct fw_card *card = device->card; int ret, node_id = device->node_id; bool changed; ret = reread_config_rom(device, device->generation, &changed); if (ret != RCODE_COMPLETE) goto failed_config_rom; if (!changed) { if (atomic_cmpxchg(&device->state, FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING) == FW_DEVICE_GONE) goto gone; fw_device_update(work); device->config_rom_retries = 0; goto out; } /* * Something changed. We keep things simple and don't investigate * further. We just destroy all previous units and create new ones. */ device_for_each_child(&device->device, NULL, shutdown_unit); ret = read_config_rom(device, device->generation); if (ret != RCODE_COMPLETE) goto failed_config_rom; fw_device_cdev_update(device); create_units(device); /* Userspace may want to re-read attributes. */ kobject_uevent(&device->device.kobj, KOBJ_CHANGE); if (atomic_cmpxchg(&device->state, FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING) == FW_DEVICE_GONE) goto gone; fw_notice(card, "refreshed device %s\n", dev_name(&device->device)); device->config_rom_retries = 0; goto out; failed_config_rom: if (device->config_rom_retries < MAX_RETRIES && atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { device->config_rom_retries++; fw_schedule_device_work(device, RETRY_DELAY); return; } fw_notice(card, "giving up on refresh of device %s: %s\n", dev_name(&device->device), fw_rcode_string(ret)); gone: atomic_set(&device->state, FW_DEVICE_GONE); device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); out: if (node_id == card->root_node->node_id) fw_schedule_bm_work(card, 0); } static void fw_device_workfn(struct work_struct *work) { struct fw_device *device = container_of(to_delayed_work(work), struct fw_device, work); device->workfn(work); } void fw_node_event(struct fw_card *card, struct fw_node *node, int event) { struct fw_device *device; switch (event) { case FW_NODE_CREATED: /* * Attempt to scan the node, regardless whether its self ID has * the L (link active) flag set or not. Some broken devices * send L=0 but have an up-and-running link; others send L=1 * without actually having a link. */ create: device = kzalloc(sizeof(*device), GFP_ATOMIC); if (device == NULL) break; /* * Do minimal intialization of the device here, the * rest will happen in fw_device_init(). * * Attention: A lot of things, even fw_device_get(), * cannot be done before fw_device_init() finished! * You can basically just check device->state and * schedule work until then, but only while holding * card->lock. */ atomic_set(&device->state, FW_DEVICE_INITIALIZING); device->card = fw_card_get(card); device->node = fw_node_get(node); device->node_id = node->node_id; device->generation = card->generation; device->is_local = node == card->local_node; mutex_init(&device->client_list_mutex); INIT_LIST_HEAD(&device->client_list); /* * Set the node data to point back to this device so * FW_NODE_UPDATED callbacks can update the node_id * and generation for the device. */ node->data = device; /* * Many devices are slow to respond after bus resets, * especially if they are bus powered and go through * power-up after getting plugged in. We schedule the * first config rom scan half a second after bus reset. */ device->workfn = fw_device_init; INIT_DELAYED_WORK(&device->work, fw_device_workfn); fw_schedule_device_work(device, INITIAL_DELAY); break; case FW_NODE_INITIATED_RESET: case FW_NODE_LINK_ON: device = node->data; if (device == NULL) goto create; device->node_id = node->node_id; smp_wmb(); /* update node_id before generation */ device->generation = card->generation; if (atomic_cmpxchg(&device->state, FW_DEVICE_RUNNING, FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { device->workfn = fw_device_refresh; fw_schedule_device_work(device, device->is_local ? 0 : INITIAL_DELAY); } break; case FW_NODE_UPDATED: device = node->data; if (device == NULL) break; device->node_id = node->node_id; smp_wmb(); /* update node_id before generation */ device->generation = card->generation; if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { device->workfn = fw_device_update; fw_schedule_device_work(device, 0); } break; case FW_NODE_DESTROYED: case FW_NODE_LINK_OFF: if (!node->data) break; /* * Destroy the device associated with the node. There * are two cases here: either the device is fully * initialized (FW_DEVICE_RUNNING) or we're in the * process of reading its config rom * (FW_DEVICE_INITIALIZING). If it is fully * initialized we can reuse device->work to schedule a * full fw_device_shutdown(). If not, there's work * scheduled to read it's config rom, and we just put * the device in shutdown state to have that code fail * to create the device. */ device = node->data; if (atomic_xchg(&device->state, FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { device->workfn = fw_device_shutdown; fw_schedule_device_work(device, list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); } break; } }
gpl-2.0
InfinitiveOS-Devices/android_kernel_motorola_msm8226
drivers/scsi/virtio_scsi.c
1558
14407
/* * Virtio SCSI HBA driver * * Copyright IBM Corp. 2010 * Copyright Red Hat, Inc. 2011 * * Authors: * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> * Paolo Bonzini <pbonzini@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/mempool.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/virtio_scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 /* Command queue element */ struct virtio_scsi_cmd { struct scsi_cmnd *sc; struct completion *comp; union { struct virtio_scsi_cmd_req cmd; struct virtio_scsi_ctrl_tmf_req tmf; struct virtio_scsi_ctrl_an_req an; } req; union { struct virtio_scsi_cmd_resp cmd; struct virtio_scsi_ctrl_tmf_resp tmf; struct virtio_scsi_ctrl_an_resp an; struct virtio_scsi_event evt; } resp; } ____cacheline_aligned_in_smp; /* Driver instance state */ struct virtio_scsi { /* Protects ctrl_vq, req_vq and sg[] */ spinlock_t vq_lock; struct virtio_device *vdev; struct virtqueue *ctrl_vq; struct virtqueue *event_vq; struct virtqueue *req_vq; /* For sglist construction when adding commands to the virtqueue. */ struct scatterlist sg[]; }; static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) { return vdev->priv; } static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) { if (!resid) return; if (!scsi_bidi_cmnd(sc)) { scsi_set_resid(sc, resid); return; } scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); scsi_out(sc)->resid = resid - scsi_in(sc)->resid; } /** * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done * * Called with vq_lock held. */ static void virtscsi_complete_cmd(void *buf) { struct virtio_scsi_cmd *cmd = buf; struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; dev_dbg(&sc->device->sdev_gendev, "cmd %p response %u status %#02x sense_len %u\n", sc, resp->response, resp->status, resp->sense_len); sc->result = resp->status; virtscsi_compute_resid(sc, resp->resid); switch (resp->response) { case VIRTIO_SCSI_S_OK: set_host_byte(sc, DID_OK); break; case VIRTIO_SCSI_S_OVERRUN: set_host_byte(sc, DID_ERROR); break; case VIRTIO_SCSI_S_ABORTED: set_host_byte(sc, DID_ABORT); break; case VIRTIO_SCSI_S_BAD_TARGET: set_host_byte(sc, DID_BAD_TARGET); break; case VIRTIO_SCSI_S_RESET: set_host_byte(sc, DID_RESET); break; case VIRTIO_SCSI_S_BUSY: set_host_byte(sc, DID_BUS_BUSY); break; case VIRTIO_SCSI_S_TRANSPORT_FAILURE: set_host_byte(sc, DID_TRANSPORT_DISRUPTED); break; case VIRTIO_SCSI_S_TARGET_FAILURE: set_host_byte(sc, DID_TARGET_FAILURE); break; case VIRTIO_SCSI_S_NEXUS_FAILURE: set_host_byte(sc, DID_NEXUS_FAILURE); break; default: scmd_printk(KERN_WARNING, sc, "Unknown response %d", resp->response); /* fall through */ case VIRTIO_SCSI_S_FAILURE: set_host_byte(sc, DID_ERROR); break; } WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE); if (sc->sense_buffer) { memcpy(sc->sense_buffer, resp->sense, min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE)); if (resp->sense_len) set_driver_byte(sc, DRIVER_SENSE); } mempool_free(cmd, virtscsi_cmd_pool); sc->scsi_done(sc); } static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf)) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); void *buf; unsigned long flags; unsigned int len; spin_lock_irqsave(&vscsi->vq_lock, flags); do { virtqueue_disable_cb(vq); while ((buf = virtqueue_get_buf(vq, &len)) != NULL) fn(buf); } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&vscsi->vq_lock, flags); } static void virtscsi_req_done(struct virtqueue *vq) { virtscsi_vq_done(vq, virtscsi_complete_cmd); }; static void virtscsi_complete_free(void *buf) { struct virtio_scsi_cmd *cmd = buf; if (cmd->comp) complete_all(cmd->comp); else mempool_free(cmd, virtscsi_cmd_pool); } static void virtscsi_ctrl_done(struct virtqueue *vq) { virtscsi_vq_done(vq, virtscsi_complete_free); }; static void virtscsi_event_done(struct virtqueue *vq) { virtscsi_vq_done(vq, virtscsi_complete_free); }; static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, struct scsi_data_buffer *sdb) { struct sg_table *table = &sdb->table; struct scatterlist *sg_elem; unsigned int idx = *p_idx; int i; for_each_sg(table->sgl, sg_elem, table->nents, i) sg[idx++] = *sg_elem; *p_idx = idx; } /** * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist * @vscsi : virtio_scsi state * @cmd : command structure * @out_num : number of read-only elements * @in_num : number of write-only elements * @req_size : size of the request buffer * @resp_size : size of the response buffer * * Called with vq_lock held. */ static void virtscsi_map_cmd(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd, unsigned *out_num, unsigned *in_num, size_t req_size, size_t resp_size) { struct scsi_cmnd *sc = cmd->sc; struct scatterlist *sg = vscsi->sg; unsigned int idx = 0; if (sc) { struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); /* TODO: check feature bit and fail if unsupported? */ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); } /* Request header. */ sg_set_buf(&sg[idx++], &cmd->req, req_size); /* Data-out buffer. */ if (sc && sc->sc_data_direction != DMA_FROM_DEVICE) virtscsi_map_sgl(sg, &idx, scsi_out(sc)); *out_num = idx; /* Response header. */ sg_set_buf(&sg[idx++], &cmd->resp, resp_size); /* Data-in buffer */ if (sc && sc->sc_data_direction != DMA_TO_DEVICE) virtscsi_map_sgl(sg, &idx, scsi_in(sc)); *in_num = idx - *out_num; } static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size, gfp_t gfp) { unsigned int out_num, in_num; unsigned long flags; int ret; spin_lock_irqsave(&vscsi->vq_lock, flags); virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size); ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp); if (ret >= 0) virtqueue_kick(vq); spin_unlock_irqrestore(&vscsi->vq_lock, flags); return ret; } static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sh); struct virtio_scsi_cmd *cmd; int ret; dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); ret = SCSI_MLQUEUE_HOST_BUSY; cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); if (!cmd) goto out; memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; cmd->req.cmd = (struct virtio_scsi_cmd_req){ .lun[0] = 1, .lun[1] = sc->device->id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, .tag = (unsigned long)sc, .task_attr = VIRTIO_SCSI_S_SIMPLE, .prio = 0, .crn = 0, }; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd, sizeof cmd->req.cmd, sizeof cmd->resp.cmd, GFP_ATOMIC) >= 0) ret = 0; out: return ret; } static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) { DECLARE_COMPLETION_ONSTACK(comp); int ret = FAILED; cmd->comp = &comp; if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, sizeof cmd->req.tmf, sizeof cmd->resp.tmf, GFP_NOIO) < 0) goto out; wait_for_completion(&comp); if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; out: mempool_free(cmd, virtscsi_cmd_pool); return ret; } static int virtscsi_device_reset(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; sdev_printk(KERN_INFO, sc->device, "device reset\n"); cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET, .lun[0] = 1, .lun[1] = sc->device->id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, }; return virtscsi_tmf(vscsi, cmd); } static int virtscsi_abort(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; scmd_printk(KERN_INFO, sc, "abort\n"); cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, .lun[0] = 1, .lun[1] = sc->device->id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, .tag = (unsigned long)sc, }; return virtscsi_tmf(vscsi, cmd); } static struct scsi_host_template virtscsi_host_template = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", .proc_name = "virtio_scsi", .queuecommand = virtscsi_queuecommand, .this_id = -1, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, .can_queue = 1024, .dma_boundary = UINT_MAX, .use_clustering = ENABLE_CLUSTERING, }; #define virtscsi_config_get(vdev, fld) \ ({ \ typeof(((struct virtio_scsi_config *)0)->fld) __val; \ vdev->config->get(vdev, \ offsetof(struct virtio_scsi_config, fld), \ &__val, sizeof(__val)); \ __val; \ }) #define virtscsi_config_set(vdev, fld, val) \ (void)({ \ typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ vdev->config->set(vdev, \ offsetof(struct virtio_scsi_config, fld), \ &__val, sizeof(__val)); \ }) static int virtscsi_init(struct virtio_device *vdev, struct virtio_scsi *vscsi) { int err; struct virtqueue *vqs[3]; vq_callback_t *callbacks[] = { virtscsi_ctrl_done, virtscsi_event_done, virtscsi_req_done }; const char *names[] = { "control", "event", "request" }; /* Discover virtqueues and write information to configuration. */ err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names); if (err) return err; vscsi->ctrl_vq = vqs[0]; vscsi->event_vq = vqs[1]; vscsi->req_vq = vqs[2]; virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); return 0; } static int __devinit virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; int err; u32 sg_elems; u32 cmd_per_lun; /* We need to know how many segments before we allocate. * We need an extra sg elements at head and tail. */ sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; /* Allocate memory and link the structs together. */ shost = scsi_host_alloc(&virtscsi_host_template, sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2)); if (!shost) return -ENOMEM; shost->sg_tablesize = sg_elems; vscsi = shost_priv(shost); vscsi->vdev = vdev; vdev->priv = shost; /* Random initializations. */ spin_lock_init(&vscsi->vq_lock); sg_init_table(vscsi->sg, sg_elems + 2); err = virtscsi_init(vdev, vscsi); if (err) goto virtscsi_init_failed; cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1; shost->max_id = virtscsi_config_get(vdev, max_target) + 1; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; scsi_scan_host(shost); return 0; scsi_add_host_failed: vdev->config->del_vqs(vdev); virtscsi_init_failed: scsi_host_put(shost); return err; } static void virtscsi_remove_vqs(struct virtio_device *vdev) { /* Stop all the virtqueues. */ vdev->config->reset(vdev); vdev->config->del_vqs(vdev); } static void __devexit virtscsi_remove(struct virtio_device *vdev) { struct Scsi_Host *shost = virtio_scsi_host(vdev); scsi_remove_host(shost); virtscsi_remove_vqs(vdev); scsi_host_put(shost); } #ifdef CONFIG_PM static int virtscsi_freeze(struct virtio_device *vdev) { virtscsi_remove_vqs(vdev); return 0; } static int virtscsi_restore(struct virtio_device *vdev) { struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); return virtscsi_init(vdev, vscsi); } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, { 0 }, }; static struct virtio_driver virtio_scsi_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtscsi_probe, #ifdef CONFIG_PM .freeze = virtscsi_freeze, .restore = virtscsi_restore, #endif .remove = __devexit_p(virtscsi_remove), }; static int __init init(void) { int ret = -ENOMEM; virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); if (!virtscsi_cmd_cache) { printk(KERN_ERR "kmem_cache_create() for " "virtscsi_cmd_cache failed\n"); goto error; } virtscsi_cmd_pool = mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, virtscsi_cmd_cache); if (!virtscsi_cmd_pool) { printk(KERN_ERR "mempool_create() for" "virtscsi_cmd_pool failed\n"); goto error; } ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; return 0; error: if (virtscsi_cmd_pool) { mempool_destroy(virtscsi_cmd_pool); virtscsi_cmd_pool = NULL; } if (virtscsi_cmd_cache) { kmem_cache_destroy(virtscsi_cmd_cache); virtscsi_cmd_cache = NULL; } return ret; } static void __exit fini(void) { unregister_virtio_driver(&virtio_scsi_driver); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } module_init(init); module_exit(fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio SCSI HBA driver"); MODULE_LICENSE("GPL");
gpl-2.0
mrksbrd/android_kernel_lge_z
drivers/platform/msm/ipa/a2_service.c
1814
47155
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * A2 service component */ #include <net/ip.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/clk.h> #include <linux/wakelock.h> #include <mach/sps.h> #include <mach/msm_smsm.h> #include <mach/socinfo.h> #include <mach/ipa.h> #include "ipa_i.h" #define A2_NUM_PIPES 6 #define A2_SUMMING_THRESHOLD 4096 #define BUFFER_SIZE 2048 #define NUM_BUFFERS 32 #define BAM_CH_LOCAL_OPEN 0x1 #define BAM_CH_REMOTE_OPEN 0x2 #define BAM_CH_IN_RESET 0x4 #define BAM_MUX_HDR_MAGIC_NO 0x33fc #define BAM_MUX_HDR_CMD_DATA 0 #define BAM_MUX_HDR_CMD_OPEN 1 #define BAM_MUX_HDR_CMD_CLOSE 2 #define BAM_MUX_HDR_CMD_STATUS 3 #define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4 #define LOW_WATERMARK 2 #define HIGH_WATERMARK 4 #define A2_MUX_COMPLETION_TIMEOUT (60*HZ) #define ENABLE_DISCONNECT_ACK 0x1 #define A2_MUX_PADDING_LENGTH(len) (4 - ((len) & 0x3)) struct bam_ch_info { u32 status; a2_mux_notify_cb notify_cb; void *user_data; spinlock_t lock; int num_tx_pkts; int use_wm; u32 v4_hdr_hdl; u32 v6_hdr_hdl; }; struct tx_pkt_info { struct sk_buff *skb; char is_cmd; u32 len; struct list_head list_node; unsigned ts_sec; unsigned long ts_nsec; }; struct bam_mux_hdr { u16 magic_num; u8 reserved; u8 cmd; u8 pad_len; u8 ch_id; u16 pkt_len; }; struct a2_mux_context_type { u32 tethered_prod; u32 tethered_cons; u32 embedded_prod; u32 embedded_cons; int a2_mux_apps_pc_enabled; struct work_struct kickoff_ul_wakeup; struct work_struct kickoff_ul_power_down; struct work_struct kickoff_ul_request_resource; struct bam_ch_info bam_ch[A2_MUX_NUM_CHANNELS]; struct list_head bam_tx_pool; spinlock_t bam_tx_pool_spinlock; struct workqueue_struct *a2_mux_tx_workqueue; struct workqueue_struct *a2_mux_rx_workqueue; int a2_mux_initialized; bool bam_is_connected; bool bam_connect_in_progress; int a2_mux_send_power_vote_on_init_once; int a2_mux_sw_bridge_is_connected; bool a2_mux_dl_wakeup; u32 a2_device_handle; struct mutex wakeup_lock; struct completion ul_wakeup_ack_completion; struct completion bam_connection_completion; struct completion request_resource_completion; struct completion dl_wakeup_completion; rwlock_t ul_wakeup_lock; int wait_for_ack; struct wake_lock bam_wakelock; int a2_pc_disabled; spinlock_t wakelock_reference_lock; int wakelock_reference_count; int a2_pc_disabled_wakelock_skipped; int disconnect_ack; struct mutex smsm_cb_lock; int bam_dmux_uplink_vote; }; static struct a2_mux_context_type *a2_mux_ctx; static void handle_a2_mux_cmd(struct sk_buff *rx_skb); static bool bam_ch_is_open(int index) { return a2_mux_ctx->bam_ch[index].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN); } static bool bam_ch_is_local_open(int index) { return a2_mux_ctx->bam_ch[index].status & BAM_CH_LOCAL_OPEN; } static bool bam_ch_is_remote_open(int index) { return a2_mux_ctx->bam_ch[index].status & BAM_CH_REMOTE_OPEN; } static bool bam_ch_is_in_reset(int index) { return a2_mux_ctx->bam_ch[index].status & BAM_CH_IN_RESET; } static void set_tx_timestamp(struct tx_pkt_info *pkt) { unsigned long long t_now; t_now = sched_clock(); pkt->ts_nsec = do_div(t_now, 1000000000U); pkt->ts_sec = (unsigned)t_now; } static void verify_tx_queue_is_empty(const char *func) { unsigned long flags; struct tx_pkt_info *info; int reported = 0; spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags); list_for_each_entry(info, &a2_mux_ctx->bam_tx_pool, list_node) { if (!reported) { IPADBG("%s: tx pool not empty\n", func); reported = 1; } IPADBG("%s: node=%p ts=%u.%09lu\n", __func__, &info->list_node, info->ts_sec, info->ts_nsec); } spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags); } static void grab_wakelock(void) { unsigned long flags; spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags); IPADBG("%s: ref count = %d\n", __func__, a2_mux_ctx->wakelock_reference_count); if (a2_mux_ctx->wakelock_reference_count == 0) wake_lock(&a2_mux_ctx->bam_wakelock); ++a2_mux_ctx->wakelock_reference_count; spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags); } static void release_wakelock(void) { unsigned long flags; spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags); if (a2_mux_ctx->wakelock_reference_count == 0) { IPAERR("%s: bam_dmux wakelock not locked\n", __func__); dump_stack(); spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags); return; } IPADBG("%s: ref count = %d\n", __func__, a2_mux_ctx->wakelock_reference_count); --a2_mux_ctx->wakelock_reference_count; if (a2_mux_ctx->wakelock_reference_count == 0) wake_unlock(&a2_mux_ctx->bam_wakelock); spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags); } static void toggle_apps_ack(void) { static unsigned int clear_bit; /* 0 = set the bit, else clear bit */ IPADBG("%s: apps ack %d->%d\n", __func__, clear_bit & 0x1, ~clear_bit & 0x1); smsm_change_state(SMSM_APPS_STATE, clear_bit & SMSM_A2_POWER_CONTROL_ACK, ~clear_bit & SMSM_A2_POWER_CONTROL_ACK); IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_apps_acks); clear_bit = ~clear_bit; } static void power_vote(int vote) { IPADBG("%s: curr=%d, vote=%d\n", __func__, a2_mux_ctx->bam_dmux_uplink_vote, vote); if (a2_mux_ctx->bam_dmux_uplink_vote == vote) IPADBG("%s: warning - duplicate power vote\n", __func__); a2_mux_ctx->bam_dmux_uplink_vote = vote; if (vote) { smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL); IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_out); } else { smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0); IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_out); } } static inline void ul_powerdown(void) { IPADBG("%s: powerdown\n", __func__); verify_tx_queue_is_empty(__func__); if (a2_mux_ctx->a2_pc_disabled) release_wakelock(); else { a2_mux_ctx->wait_for_ack = 1; INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion); power_vote(0); } } static void ul_wakeup(void) { int ret; mutex_lock(&a2_mux_ctx->wakeup_lock); if (a2_mux_ctx->bam_is_connected && !a2_mux_ctx->bam_connect_in_progress) { IPADBG("%s Already awake\n", __func__); mutex_unlock(&a2_mux_ctx->wakeup_lock); return; } if (a2_mux_ctx->a2_pc_disabled) { /* * don't grab the wakelock the first time because it is * already grabbed when a2 powers on */ if (likely(a2_mux_ctx->a2_pc_disabled_wakelock_skipped)) grab_wakelock(); else a2_mux_ctx->a2_pc_disabled_wakelock_skipped = 1; mutex_unlock(&a2_mux_ctx->wakeup_lock); return; } /* * must wait for the previous power down request to have been acked * chances are it already came in and this will just fall through * instead of waiting */ if (a2_mux_ctx->wait_for_ack) { IPADBG("%s waiting for previous ack\n", __func__); ret = wait_for_completion_timeout( &a2_mux_ctx->ul_wakeup_ack_completion, A2_MUX_COMPLETION_TIMEOUT); a2_mux_ctx->wait_for_ack = 0; if (unlikely(ret == 0)) { IPAERR("%s previous ack from modem timed out\n", __func__); goto bail; } } INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion); power_vote(1); IPADBG("%s waiting for wakeup ack\n", __func__); ret = wait_for_completion_timeout(&a2_mux_ctx->ul_wakeup_ack_completion, A2_MUX_COMPLETION_TIMEOUT); if (unlikely(ret == 0)) { IPAERR("%s wakup ack from modem timed out\n", __func__); goto bail; } INIT_COMPLETION(a2_mux_ctx->bam_connection_completion); if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) { ret = wait_for_completion_timeout( &a2_mux_ctx->bam_connection_completion, A2_MUX_COMPLETION_TIMEOUT); if (unlikely(ret == 0)) { IPAERR("%s modem power on timed out\n", __func__); goto bail; } } IPADBG("%s complete\n", __func__); mutex_unlock(&a2_mux_ctx->wakeup_lock); return; bail: mutex_unlock(&a2_mux_ctx->wakeup_lock); BUG(); return; } static void a2_mux_write_done(bool is_tethered, struct sk_buff *skb) { struct tx_pkt_info *info; enum a2_mux_logical_channel_id lcid; unsigned long event_data; unsigned long flags; spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags); info = list_first_entry(&a2_mux_ctx->bam_tx_pool, struct tx_pkt_info, list_node); if (unlikely(info->skb != skb)) { struct tx_pkt_info *errant_pkt; IPAERR("tx_pool mismatch next=%p list_node=%p, ts=%u.%09lu\n", a2_mux_ctx->bam_tx_pool.next, &info->list_node, info->ts_sec, info->ts_nsec ); list_for_each_entry(errant_pkt, &a2_mux_ctx->bam_tx_pool, list_node) { IPAERR("%s: node=%p ts=%u.%09lu\n", __func__, &errant_pkt->list_node, errant_pkt->ts_sec, errant_pkt->ts_nsec); if (errant_pkt->skb == skb) info = errant_pkt; } spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags); BUG(); } list_del(&info->list_node); spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags); if (info->is_cmd) { dev_kfree_skb_any(info->skb); kfree(info); return; } skb = info->skb; kfree(info); event_data = (unsigned long)(skb); if (is_tethered) lcid = A2_MUX_TETHERED_0; else { struct bam_mux_hdr *hdr = (struct bam_mux_hdr *)skb->data; lcid = (enum a2_mux_logical_channel_id) hdr->ch_id; } spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags); a2_mux_ctx->bam_ch[lcid].num_tx_pkts--; spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); if (a2_mux_ctx->bam_ch[lcid].notify_cb) a2_mux_ctx->bam_ch[lcid].notify_cb( a2_mux_ctx->bam_ch[lcid].user_data, A2_MUX_WRITE_DONE, event_data); else dev_kfree_skb_any(skb); } static bool a2_mux_kickoff_ul_power_down(void) { bool is_connected; write_lock(&a2_mux_ctx->ul_wakeup_lock); if (a2_mux_ctx->bam_connect_in_progress) { a2_mux_ctx->bam_is_connected = false; is_connected = true; } else { is_connected = a2_mux_ctx->bam_is_connected; a2_mux_ctx->bam_is_connected = false; if (is_connected) { a2_mux_ctx->bam_connect_in_progress = true; queue_work(a2_mux_ctx->a2_mux_tx_workqueue, &a2_mux_ctx->kickoff_ul_power_down); } } write_unlock(&a2_mux_ctx->ul_wakeup_lock); return is_connected; } static bool a2_mux_kickoff_ul_wakeup(void) { bool is_connected; write_lock(&a2_mux_ctx->ul_wakeup_lock); if (a2_mux_ctx->bam_connect_in_progress) { a2_mux_ctx->bam_is_connected = true; is_connected = false; } else { is_connected = a2_mux_ctx->bam_is_connected; a2_mux_ctx->bam_is_connected = true; if (!is_connected) { a2_mux_ctx->bam_connect_in_progress = true; queue_work(a2_mux_ctx->a2_mux_tx_workqueue, &a2_mux_ctx->kickoff_ul_wakeup); } } write_unlock(&a2_mux_ctx->ul_wakeup_lock); return is_connected; } static void kickoff_ul_power_down_func(struct work_struct *work) { bool is_connected; IPADBG("%s: UL active - forcing powerdown\n", __func__); ul_powerdown(); write_lock(&a2_mux_ctx->ul_wakeup_lock); is_connected = a2_mux_ctx->bam_is_connected; a2_mux_ctx->bam_is_connected = false; a2_mux_ctx->bam_connect_in_progress = false; write_unlock(&a2_mux_ctx->ul_wakeup_lock); if (is_connected) a2_mux_kickoff_ul_wakeup(); else ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED, IPA_RM_RESOURCE_A2_CONS); } static void kickoff_ul_wakeup_func(struct work_struct *work) { bool is_connected; int ret; ul_wakeup(); write_lock(&a2_mux_ctx->ul_wakeup_lock); is_connected = a2_mux_ctx->bam_is_connected; a2_mux_ctx->bam_is_connected = true; a2_mux_ctx->bam_connect_in_progress = false; write_unlock(&a2_mux_ctx->ul_wakeup_lock); if (is_connected) ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED, IPA_RM_RESOURCE_A2_CONS); INIT_COMPLETION(a2_mux_ctx->dl_wakeup_completion); if (!a2_mux_ctx->a2_mux_dl_wakeup) { ret = wait_for_completion_timeout( &a2_mux_ctx->dl_wakeup_completion, A2_MUX_COMPLETION_TIMEOUT); if (unlikely(ret == 0)) { IPAERR("%s timeout waiting for A2 PROD granted\n", __func__); BUG(); return; } } if (!is_connected) a2_mux_kickoff_ul_power_down(); } static void kickoff_ul_request_resource_func(struct work_struct *work) { int ret; INIT_COMPLETION(a2_mux_ctx->request_resource_completion); ret = ipa_rm_request_resource(IPA_RM_RESOURCE_A2_PROD); if (ret < 0 && ret != -EINPROGRESS) { IPAERR("%s: ipa_rm_request_resource failed %d\n", __func__, ret); return; } if (ret == -EINPROGRESS) { ret = wait_for_completion_timeout( &a2_mux_ctx->request_resource_completion, A2_MUX_COMPLETION_TIMEOUT); if (unlikely(ret == 0)) { IPAERR("%s timeout waiting for A2 PROD granted\n", __func__); BUG(); return; } } toggle_apps_ack(); a2_mux_ctx->a2_mux_dl_wakeup = true; complete_all(&a2_mux_ctx->dl_wakeup_completion); } static void ipa_embedded_notify(void *priv, enum ipa_dp_evt_type evt, unsigned long data) { switch (evt) { case IPA_RECEIVE: handle_a2_mux_cmd((struct sk_buff *)data); break; case IPA_WRITE_DONE: a2_mux_write_done(false, (struct sk_buff *)data); break; default: IPAERR("%s: Unknown event %d\n", __func__, evt); break; } } static void ipa_tethered_notify(void *priv, enum ipa_dp_evt_type evt, unsigned long data) { IPADBG("%s: event = %d\n", __func__, evt); switch (evt) { case IPA_RECEIVE: if (a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb) a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb( a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].user_data, A2_MUX_RECEIVE, data); break; case IPA_WRITE_DONE: a2_mux_write_done(true, (struct sk_buff *)data); break; default: IPAERR("%s: Unknown event %d\n", __func__, evt); break; } } static int connect_to_bam(void) { int ret; struct ipa_sys_connect_params connect_params; IPAERR("%s:\n", __func__); if (a2_mux_ctx->a2_mux_sw_bridge_is_connected) { IPAERR("%s: SW bridge is already UP\n", __func__); return -EFAULT; } if (sps_ctrl_bam_dma_clk(true)) WARN_ON(1); memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params)); connect_params.client = IPA_CLIENT_A2_TETHERED_CONS; connect_params.notify = ipa_tethered_notify; connect_params.desc_fifo_sz = 0x800; ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED, &connect_params, &a2_mux_ctx->tethered_prod); if (ret) { IPAERR("%s: IPA bridge tethered UL failed to connect: %d\n", __func__, ret); goto bridge_tethered_ul_failed; } memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params)); connect_params.ipa_ep_cfg.mode.mode = IPA_DMA; connect_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS; connect_params.client = IPA_CLIENT_A2_TETHERED_PROD; connect_params.notify = ipa_tethered_notify; connect_params.desc_fifo_sz = 0x800; ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED, &connect_params, &a2_mux_ctx->tethered_cons); if (ret) { IPAERR("%s: IPA bridge tethered DL failed to connect: %d\n", __func__, ret); goto bridge_tethered_dl_failed; } memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params)); connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr); connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 6; connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS; connect_params.notify = ipa_embedded_notify; connect_params.desc_fifo_sz = 0x800; ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED, &connect_params, &a2_mux_ctx->embedded_prod); if (ret) { IPAERR("%s: IPA bridge embedded UL failed to connect: %d\n", __func__, ret); goto bridge_embedded_ul_failed; } memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params)); connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr); connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 4; connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD; connect_params.notify = ipa_embedded_notify; connect_params.desc_fifo_sz = 0x800; ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED, &connect_params, &a2_mux_ctx->embedded_cons); if (ret) { IPAERR("%s: IPA bridge embedded DL failed to connect: %d\n", __func__, ret); goto bridge_embedded_dl_failed; } a2_mux_ctx->a2_mux_sw_bridge_is_connected = 1; complete_all(&a2_mux_ctx->bam_connection_completion); return 0; bridge_embedded_dl_failed: ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED, a2_mux_ctx->embedded_prod); bridge_embedded_ul_failed: ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED, a2_mux_ctx->tethered_cons); bridge_tethered_dl_failed: ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED, a2_mux_ctx->tethered_prod); bridge_tethered_ul_failed: if (sps_ctrl_bam_dma_clk(false)) WARN_ON(1); return ret; } static int disconnect_to_bam(void) { int ret; IPAERR("%s\n", __func__); if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) { IPAERR("%s: SW bridge is already DOWN\n", __func__); return -EFAULT; } ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED, a2_mux_ctx->tethered_prod); if (ret) { IPAERR("%s: IPA bridge tethered UL failed to disconnect: %d\n", __func__, ret); return ret; } ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED, a2_mux_ctx->tethered_cons); if (ret) { IPAERR("%s: IPA bridge tethered DL failed to disconnect: %d\n", __func__, ret); return ret; } ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED, a2_mux_ctx->embedded_prod); if (ret) { IPAERR("%s: IPA bridge embedded UL failed to disconnect: %d\n", __func__, ret); return ret; } ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED, a2_mux_ctx->embedded_cons); if (ret) { IPAERR("%s: IPA bridge embedded DL failed to disconnect: %d\n", __func__, ret); return ret; } if (sps_ctrl_bam_dma_clk(false)) WARN_ON(1); verify_tx_queue_is_empty(__func__); (void) ipa_rm_release_resource(IPA_RM_RESOURCE_A2_PROD); if (a2_mux_ctx->disconnect_ack) toggle_apps_ack(); a2_mux_ctx->a2_mux_dl_wakeup = false; a2_mux_ctx->a2_mux_sw_bridge_is_connected = 0; complete_all(&a2_mux_ctx->bam_connection_completion); return 0; } static void a2_mux_smsm_cb(void *priv, u32 old_state, u32 new_state) { static int last_processed_state; mutex_lock(&a2_mux_ctx->smsm_cb_lock); IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state, new_state); if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) { IPADBG("%s: already processed this state\n", __func__); mutex_unlock(&a2_mux_ctx->smsm_cb_lock); return; } last_processed_state = new_state & SMSM_A2_POWER_CONTROL; if (new_state & SMSM_A2_POWER_CONTROL) { IPADBG("%s: MODEM PWR CTRL 1\n", __func__); IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_in); grab_wakelock(); (void) connect_to_bam(); queue_work(a2_mux_ctx->a2_mux_rx_workqueue, &a2_mux_ctx->kickoff_ul_request_resource); } else if (!(new_state & SMSM_A2_POWER_CONTROL)) { IPADBG("%s: MODEM PWR CTRL 0\n", __func__); IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_in); (void) disconnect_to_bam(); release_wakelock(); } else { IPAERR("%s: unsupported state change\n", __func__); } mutex_unlock(&a2_mux_ctx->smsm_cb_lock); } static void a2_mux_smsm_ack_cb(void *priv, u32 old_state, u32 new_state) { IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state, new_state); IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_modem_acks); complete_all(&a2_mux_ctx->ul_wakeup_ack_completion); } static int a2_mux_pm_rm_request_resource(void) { int result = 0; bool is_connected; is_connected = a2_mux_kickoff_ul_wakeup(); if (!is_connected) result = -EINPROGRESS; return result; } static int a2_mux_pm_rm_release_resource(void) { int result = 0; bool is_connected; is_connected = a2_mux_kickoff_ul_power_down(); if (is_connected) result = -EINPROGRESS; return result; } static void a2_mux_pm_rm_notify_cb(void *user_data, enum ipa_rm_event event, unsigned long data) { switch (event) { case IPA_RM_RESOURCE_GRANTED: IPADBG("%s: PROD GRANTED CB\n", __func__); complete_all(&a2_mux_ctx->request_resource_completion); break; case IPA_RM_RESOURCE_RELEASED: IPADBG("%s: PROD RELEASED CB\n", __func__); break; default: return; } } static int a2_mux_pm_initialize_rm(void) { struct ipa_rm_create_params create_params; int result; memset(&create_params, 0, sizeof(create_params)); create_params.name = IPA_RM_RESOURCE_A2_PROD; create_params.reg_params.notify_cb = &a2_mux_pm_rm_notify_cb; result = ipa_rm_create_resource(&create_params); if (result) goto bail; memset(&create_params, 0, sizeof(create_params)); create_params.name = IPA_RM_RESOURCE_A2_CONS; create_params.release_resource = &a2_mux_pm_rm_release_resource; create_params.request_resource = &a2_mux_pm_rm_request_resource; result = ipa_rm_create_resource(&create_params); bail: return result; } static void a2_mux_process_data(struct sk_buff *rx_skb) { unsigned long flags; struct bam_mux_hdr *rx_hdr; unsigned long event_data; rx_hdr = (struct bam_mux_hdr *)rx_skb->data; rx_skb->data = (unsigned char *)(rx_hdr + 1); rx_skb->tail = rx_skb->data + rx_hdr->pkt_len; rx_skb->len = rx_hdr->pkt_len; rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff); event_data = (unsigned long)(rx_skb); spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags); if (a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb) a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb( a2_mux_ctx->bam_ch[rx_hdr->ch_id].user_data, A2_MUX_RECEIVE, event_data); else dev_kfree_skb_any(rx_skb); spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags); } static void handle_a2_mux_cmd_open(struct bam_mux_hdr *rx_hdr) { unsigned long flags; spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags); a2_mux_ctx->bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN; a2_mux_ctx->bam_ch[rx_hdr->ch_id].num_tx_pkts = 0; spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags); } static void handle_a2_mux_cmd(struct sk_buff *rx_skb) { unsigned long flags; struct bam_mux_hdr *rx_hdr; rx_hdr = (struct bam_mux_hdr *)rx_skb->data; IPADBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__, rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); rx_hdr->magic_num = ntohs(rx_hdr->magic_num); rx_hdr->pkt_len = ntohs(rx_hdr->pkt_len); IPADBG("%s: converted to host order magic_num=%d, pkt_len=%d\n", __func__, rx_hdr->magic_num, rx_hdr->pkt_len); if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) { IPAERR("bad hdr magic %x rvd %d cmd %d pad %d ch %d len %d\n", rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); dev_kfree_skb_any(rx_skb); return; } if (rx_hdr->ch_id >= A2_MUX_NUM_CHANNELS) { IPAERR("bad LCID %d rsvd %d cmd %d pad %d ch %d len %d\n", rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); dev_kfree_skb_any(rx_skb); return; } switch (rx_hdr->cmd) { case BAM_MUX_HDR_CMD_DATA: a2_mux_process_data(rx_skb); break; case BAM_MUX_HDR_CMD_OPEN: IPADBG("%s: opening cid %d PC enabled\n", __func__, rx_hdr->ch_id); handle_a2_mux_cmd_open(rx_hdr); if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) { IPADBG("%s: deactivating disconnect ack\n", __func__); a2_mux_ctx->disconnect_ack = 0; } dev_kfree_skb_any(rx_skb); if (a2_mux_ctx->a2_mux_send_power_vote_on_init_once) { kickoff_ul_wakeup_func(NULL); a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 0; } break; case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC: IPADBG("%s: opening cid %d PC disabled\n", __func__, rx_hdr->ch_id); if (!a2_mux_ctx->a2_pc_disabled) { a2_mux_ctx->a2_pc_disabled = 1; ul_wakeup(); } handle_a2_mux_cmd_open(rx_hdr); dev_kfree_skb_any(rx_skb); break; case BAM_MUX_HDR_CMD_CLOSE: /* probably should drop pending write */ IPADBG("%s: closing cid %d\n", __func__, rx_hdr->ch_id); spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags); a2_mux_ctx->bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN; spin_unlock_irqrestore( &a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags); dev_kfree_skb_any(rx_skb); break; default: IPAERR("bad hdr.magic %x rvd %d cmd %d pad %d ch %d len %d\n", rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len); dev_kfree_skb_any(rx_skb); return; } } static int a2_mux_write_cmd(void *data, u32 len) { int rc; struct tx_pkt_info *pkt; unsigned long flags; pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC); if (pkt == NULL) { IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__); return -ENOMEM; } pkt->skb = __dev_alloc_skb(len, GFP_NOWAIT | __GFP_NOWARN); if (pkt->skb == NULL) { IPAERR("%s: unable to alloc skb\n\n", __func__); kfree(pkt); return -ENOMEM; } memcpy(skb_put(pkt->skb, len), data, len); kfree(data); pkt->len = len; pkt->is_cmd = 1; set_tx_timestamp(pkt); spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags); list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool); rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, pkt->skb, NULL); if (rc) { IPAERR("%s ipa_tx_dp failed rc=%d\n", __func__, rc); list_del(&pkt->list_node); spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags); dev_kfree_skb_any(pkt->skb); kfree(pkt); } else { spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags); } return rc; } /** * a2_mux_get_tethered_client_handles() - provide the tethred * pipe handles for post setup configuration * @lcid: logical channel ID * @clnt_cons_handle: [out] consumer pipe handle * @clnt_prod_handle: [out] producer pipe handle * * Returns: 0 on success, negative on failure */ int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid, unsigned int *clnt_cons_handle, unsigned int *clnt_prod_handle) { if (!a2_mux_ctx->a2_mux_initialized || lcid != A2_MUX_TETHERED_0) return -ENODEV; if (!clnt_cons_handle || !clnt_prod_handle) return -EINVAL; *clnt_prod_handle = a2_mux_ctx->tethered_prod; *clnt_cons_handle = a2_mux_ctx->tethered_cons; return 0; } /** * a2_mux_write() - send the packet to A2, * add MUX header acc to lcid provided * @id: logical channel ID * @skb: SKB to write * * Returns: 0 on success, negative on failure */ int a2_mux_write(enum a2_mux_logical_channel_id id, struct sk_buff *skb) { int rc = 0; struct bam_mux_hdr *hdr; unsigned long flags; struct sk_buff *new_skb = NULL; struct tx_pkt_info *pkt; bool is_connected; if (id >= A2_MUX_NUM_CHANNELS) return -EINVAL; if (!skb) return -EINVAL; if (!a2_mux_ctx->a2_mux_initialized) return -ENODEV; spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags); if (!bam_ch_is_open(id)) { spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags); IPAERR("%s: port not open: %d\n", __func__, a2_mux_ctx->bam_ch[id].status); return -ENODEV; } if (a2_mux_ctx->bam_ch[id].use_wm && (a2_mux_ctx->bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) { spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags); IPAERR("%s: watermark exceeded: %d\n", __func__, id); return -EAGAIN; } spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags); read_lock(&a2_mux_ctx->ul_wakeup_lock); is_connected = a2_mux_ctx->bam_is_connected && !a2_mux_ctx->bam_connect_in_progress; read_unlock(&a2_mux_ctx->ul_wakeup_lock); if (!is_connected) return -ENODEV; if (id != A2_MUX_TETHERED_0) { /* * if skb do not have any tailroom for padding * copy the skb into a new expanded skb */ if ((skb->len & 0x3) && (skb_tailroom(skb) < A2_MUX_PADDING_LENGTH(skb->len))) { new_skb = skb_copy_expand(skb, skb_headroom(skb), A2_MUX_PADDING_LENGTH(skb->len), GFP_ATOMIC); if (new_skb == NULL) { IPAERR("%s: cannot allocate skb\n", __func__); rc = -ENOMEM; goto write_fail; } dev_kfree_skb_any(skb); skb = new_skb; } hdr = (struct bam_mux_hdr *)skb_push( skb, sizeof(struct bam_mux_hdr)); /* * caller should allocate for hdr and padding * hdr is fine, padding is tricky */ hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; hdr->cmd = BAM_MUX_HDR_CMD_DATA; hdr->reserved = 0; hdr->ch_id = id; hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr); if (skb->len & 0x3) skb_put(skb, A2_MUX_PADDING_LENGTH(skb->len)); hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len); IPADBG("data %p, tail %p skb len %d pkt len %d pad len %d\n", skb->data, skb->tail, skb->len, hdr->pkt_len, hdr->pad_len); hdr->magic_num = htons(hdr->magic_num); hdr->pkt_len = htons(hdr->pkt_len); IPADBG("convert to network order magic_num=%d, pkt_len=%d\n", hdr->magic_num, hdr->pkt_len); } pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC); if (pkt == NULL) { IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__); rc = -ENOMEM; goto write_fail2; } pkt->skb = skb; pkt->is_cmd = 0; set_tx_timestamp(pkt); spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags); list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool); if (id == A2_MUX_TETHERED_0) rc = ipa_tx_dp(IPA_CLIENT_A2_TETHERED_CONS, skb, NULL); else rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, skb, NULL); if (rc) { IPAERR("%s ipa_tx_dp failed rc=%d\n", __func__, rc); list_del(&pkt->list_node); spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags); goto write_fail3; } else { spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags); spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags); a2_mux_ctx->bam_ch[id].num_tx_pkts++; spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags); } return 0; write_fail3: kfree(pkt); write_fail2: if (new_skb) dev_kfree_skb_any(new_skb); write_fail: return rc; } /** * a2_mux_add_hdr() - called when MUX header should * be added * @lcid: logical channel ID * * Returns: 0 on success, negative on failure */ static int a2_mux_add_hdr(enum a2_mux_logical_channel_id lcid) { struct ipa_ioc_add_hdr *hdrs; struct ipa_hdr_add *ipv4_hdr; struct ipa_hdr_add *ipv6_hdr; struct bam_mux_hdr *dmux_hdr; int rc; IPADBG("%s: ch %d\n", __func__, lcid); if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) { IPAERR("%s: non valid lcid passed: %d\n", __func__, lcid); return -EINVAL; } hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add), GFP_KERNEL); if (!hdrs) { IPAERR("%s: hdr allocation fail for ch %d\n", __func__, lcid); return -ENOMEM; } ipv4_hdr = &hdrs->hdr[0]; ipv6_hdr = &hdrs->hdr[1]; dmux_hdr = (struct bam_mux_hdr *)ipv4_hdr->hdr; snprintf(ipv4_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d", A2_MUX_HDR_NAME_V4_PREF, lcid); dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA; dmux_hdr->reserved = 0; dmux_hdr->ch_id = lcid; /* Packet lenght is added by IPA */ dmux_hdr->pkt_len = 0; dmux_hdr->pad_len = 0; dmux_hdr->magic_num = htons(dmux_hdr->magic_num); IPADBG("converted to network order magic_num=%d\n", dmux_hdr->magic_num); ipv4_hdr->hdr_len = sizeof(struct bam_mux_hdr); ipv4_hdr->is_partial = 0; dmux_hdr = (struct bam_mux_hdr *)ipv6_hdr->hdr; snprintf(ipv6_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d", A2_MUX_HDR_NAME_V6_PREF, lcid); dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA; dmux_hdr->reserved = 0; dmux_hdr->ch_id = lcid; /* Packet lenght is added by IPA */ dmux_hdr->pkt_len = 0; dmux_hdr->pad_len = 0; dmux_hdr->magic_num = htons(dmux_hdr->magic_num); IPADBG("converted to network order magic_num=%d\n", dmux_hdr->magic_num); ipv6_hdr->hdr_len = sizeof(struct bam_mux_hdr); ipv6_hdr->is_partial = 0; hdrs->commit = 1; hdrs->num_hdrs = 2; rc = ipa_add_hdr(hdrs); if (rc) { IPAERR("Fail on Header-Insertion(%d)\n", rc); goto bail; } if (ipv4_hdr->status) { IPAERR("Fail on Header-Insertion ipv4(%d)\n", ipv4_hdr->status); rc = ipv4_hdr->status; goto bail; } if (ipv6_hdr->status) { IPAERR("%s: Fail on Header-Insertion ipv4(%d)\n", __func__, ipv6_hdr->status); rc = ipv6_hdr->status; goto bail; } a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = ipv4_hdr->hdr_hdl; a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = ipv6_hdr->hdr_hdl; rc = 0; bail: kfree(hdrs); return rc; } /** * a2_mux_del_hdr() - called when MUX header should * be removed * @lcid: logical channel ID * * Returns: 0 on success, negative on failure */ static int a2_mux_del_hdr(enum a2_mux_logical_channel_id lcid) { struct ipa_ioc_del_hdr *hdrs; struct ipa_hdr_del *ipv4_hdl; struct ipa_hdr_del *ipv6_hdl; int rc; IPADBG("%s: ch %d\n", __func__, lcid); if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) { IPAERR("invalid lcid passed: %d\n", lcid); return -EINVAL; } hdrs = kzalloc(sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del), GFP_KERNEL); if (!hdrs) { IPAERR("hdr alloc fail for ch %d\n", lcid); return -ENOMEM; } ipv4_hdl = &hdrs->hdl[0]; ipv6_hdl = &hdrs->hdl[1]; ipv4_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl; ipv6_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl; hdrs->commit = 1; hdrs->num_hdls = 2; rc = ipa_del_hdr(hdrs); if (rc) { IPAERR("Fail on Del Header-Insertion(%d)\n", rc); goto bail; } if (ipv4_hdl->status) { IPAERR("Fail on Del Header-Insertion ipv4(%d)\n", ipv4_hdl->status); rc = ipv4_hdl->status; goto bail; } a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = 0; if (ipv6_hdl->status) { IPAERR("Fail on Del Header-Insertion ipv4(%d)\n", ipv6_hdl->status); rc = ipv6_hdl->status; goto bail; } a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = 0; rc = 0; bail: kfree(hdrs); return rc; } /** * a2_mux_open_channel() - opens logical channel * to A2 * @lcid: logical channel ID * @user_data: user provided data for below CB * @notify_cb: user provided notification CB * * Returns: 0 on success, negative on failure */ int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid, void *user_data, a2_mux_notify_cb notify_cb) { struct bam_mux_hdr *hdr; unsigned long flags; int rc = 0; bool is_connected; IPADBG("%s: opening ch %d\n", __func__, lcid); if (!a2_mux_ctx->a2_mux_initialized) { IPAERR("%s: not inititialized\n", __func__); return -ENODEV; } if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) { IPAERR("%s: invalid channel id %d\n", __func__, lcid); return -EINVAL; } if (notify_cb == NULL) { IPAERR("%s: notify function is NULL\n", __func__); return -EINVAL; } spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags); if (bam_ch_is_open(lcid)) { IPAERR("%s: Already opened %d\n", __func__, lcid); spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); goto open_done; } if (!bam_ch_is_remote_open(lcid)) { IPAERR("%s: Remote not open; ch: %d\n", __func__, lcid); spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); return -ENODEV; } a2_mux_ctx->bam_ch[lcid].notify_cb = notify_cb; a2_mux_ctx->bam_ch[lcid].user_data = user_data; a2_mux_ctx->bam_ch[lcid].status |= BAM_CH_LOCAL_OPEN; a2_mux_ctx->bam_ch[lcid].num_tx_pkts = 0; a2_mux_ctx->bam_ch[lcid].use_wm = 0; spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); read_lock(&a2_mux_ctx->ul_wakeup_lock); is_connected = a2_mux_ctx->bam_is_connected && !a2_mux_ctx->bam_connect_in_progress; read_unlock(&a2_mux_ctx->ul_wakeup_lock); if (!is_connected) return -ENODEV; if (lcid != A2_MUX_TETHERED_0) { hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL); if (hdr == NULL) { IPAERR("%s: hdr kmalloc failed. ch: %d\n", __func__, lcid); return -ENOMEM; } hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; if (a2_mux_ctx->a2_mux_apps_pc_enabled) { hdr->cmd = BAM_MUX_HDR_CMD_OPEN; } else { IPAERR("%s: PC DISABLED BY A5 SW BY INTENTION\n", __func__); a2_mux_ctx->a2_pc_disabled = 1; hdr->cmd = BAM_MUX_HDR_CMD_OPEN_NO_A2_PC; } hdr->reserved = 0; hdr->ch_id = lcid; hdr->pkt_len = 0; hdr->pad_len = 0; hdr->magic_num = htons(hdr->magic_num); hdr->pkt_len = htons(hdr->pkt_len); IPADBG("convert to network order magic_num=%d, pkt_len=%d\n", hdr->magic_num, hdr->pkt_len); rc = a2_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr)); if (rc) { IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n", __func__, rc, lcid); kfree(hdr); return rc; } rc = a2_mux_add_hdr(lcid); if (rc) { IPAERR("a2_mux_add_hdr failed %d; ch: %d\n", rc, lcid); return rc; } } open_done: IPADBG("%s: opened ch %d\n", __func__, lcid); return rc; } /** * a2_mux_close_channel() - closes logical channel * to A2 * @lcid: logical channel ID * * Returns: 0 on success, negative on failure */ int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid) { struct bam_mux_hdr *hdr; unsigned long flags; int rc = 0; bool is_connected; if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) return -EINVAL; IPADBG("%s: closing ch %d\n", __func__, lcid); if (!a2_mux_ctx->a2_mux_initialized) return -ENODEV; read_lock(&a2_mux_ctx->ul_wakeup_lock); is_connected = a2_mux_ctx->bam_is_connected && !a2_mux_ctx->bam_connect_in_progress; read_unlock(&a2_mux_ctx->ul_wakeup_lock); if (!is_connected && !bam_ch_is_in_reset(lcid)) return -ENODEV; spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags); a2_mux_ctx->bam_ch[lcid].notify_cb = NULL; a2_mux_ctx->bam_ch[lcid].user_data = NULL; a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_LOCAL_OPEN; spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); if (bam_ch_is_in_reset(lcid)) { a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_IN_RESET; return 0; } if (lcid != A2_MUX_TETHERED_0) { hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC); if (hdr == NULL) { IPAERR("%s: hdr kmalloc failed. ch: %d\n", __func__, lcid); return -ENOMEM; } hdr->magic_num = BAM_MUX_HDR_MAGIC_NO; hdr->cmd = BAM_MUX_HDR_CMD_CLOSE; hdr->reserved = 0; hdr->ch_id = lcid; hdr->pkt_len = 0; hdr->pad_len = 0; hdr->magic_num = htons(hdr->magic_num); hdr->pkt_len = htons(hdr->pkt_len); IPADBG("convert to network order magic_num=%d, pkt_len=%d\n", hdr->magic_num, hdr->pkt_len); rc = a2_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr)); if (rc) { IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n", __func__, rc, lcid); kfree(hdr); return rc; } rc = a2_mux_del_hdr(lcid); if (rc) { IPAERR("a2_mux_del_hdr failed %d; ch: %d\n", rc, lcid); return rc; } } IPADBG("%s: closed ch %d\n", __func__, lcid); return 0; } /** * a2_mux_is_ch_full() - checks if channel is above predefined WM, * used for flow control implementation * @lcid: logical channel ID * * Returns: true if the channel is above predefined WM, * false otherwise */ int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid) { unsigned long flags; int ret; if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) return -EINVAL; if (!a2_mux_ctx->a2_mux_initialized) return -ENODEV; spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags); a2_mux_ctx->bam_ch[lcid].use_wm = 1; ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts >= HIGH_WATERMARK; IPADBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__, lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret); if (!bam_ch_is_local_open(lcid)) { ret = -ENODEV; IPAERR("%s: port not open: %d\n", __func__, a2_mux_ctx->bam_ch[lcid].status); } spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); return ret; } /** * a2_mux_is_ch_low() - checks if channel is below predefined WM, * used for flow control implementation * @lcid: logical channel ID * * Returns: true if the channel is below predefined WM, * false otherwise */ int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid) { unsigned long flags; int ret; if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) return -EINVAL; if (!a2_mux_ctx->a2_mux_initialized) return -ENODEV; spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags); a2_mux_ctx->bam_ch[lcid].use_wm = 1; ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts <= LOW_WATERMARK; IPADBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__, lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret); if (!bam_ch_is_local_open(lcid)) { ret = -ENODEV; IPAERR("%s: port not open: %d\n", __func__, a2_mux_ctx->bam_ch[lcid].status); } spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); return ret; } /** * a2_mux_is_ch_empty() - checks if channel is empty. * @lcid: logical channel ID * * Returns: true if the channel is empty, * false otherwise */ int a2_mux_is_ch_empty(enum a2_mux_logical_channel_id lcid) { unsigned long flags; int ret; if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) return -EINVAL; if (!a2_mux_ctx->a2_mux_initialized) return -ENODEV; spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags); a2_mux_ctx->bam_ch[lcid].use_wm = 1; ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts == 0; if (!bam_ch_is_local_open(lcid)) { ret = -ENODEV; IPAERR("%s: port not open: %d\n", __func__, a2_mux_ctx->bam_ch[lcid].status); } spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags); return ret; } static int a2_mux_initialize_context(int handle) { int i; a2_mux_ctx->a2_mux_apps_pc_enabled = 1; a2_mux_ctx->a2_device_handle = handle; INIT_WORK(&a2_mux_ctx->kickoff_ul_wakeup, kickoff_ul_wakeup_func); INIT_WORK(&a2_mux_ctx->kickoff_ul_power_down, kickoff_ul_power_down_func); INIT_WORK(&a2_mux_ctx->kickoff_ul_request_resource, kickoff_ul_request_resource_func); INIT_LIST_HEAD(&a2_mux_ctx->bam_tx_pool); spin_lock_init(&a2_mux_ctx->bam_tx_pool_spinlock); mutex_init(&a2_mux_ctx->wakeup_lock); rwlock_init(&a2_mux_ctx->ul_wakeup_lock); spin_lock_init(&a2_mux_ctx->wakelock_reference_lock); a2_mux_ctx->disconnect_ack = 1; mutex_init(&a2_mux_ctx->smsm_cb_lock); for (i = 0; i < A2_MUX_NUM_CHANNELS; ++i) spin_lock_init(&a2_mux_ctx->bam_ch[i].lock); init_completion(&a2_mux_ctx->ul_wakeup_ack_completion); init_completion(&a2_mux_ctx->bam_connection_completion); init_completion(&a2_mux_ctx->request_resource_completion); init_completion(&a2_mux_ctx->dl_wakeup_completion); wake_lock_init(&a2_mux_ctx->bam_wakelock, WAKE_LOCK_SUSPEND, "a2_mux_wakelock"); a2_mux_ctx->a2_mux_initialized = 1; a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 1; a2_mux_ctx->a2_mux_tx_workqueue = create_singlethread_workqueue("a2_mux_tx"); if (!a2_mux_ctx->a2_mux_tx_workqueue) { IPAERR("%s: a2_mux_tx_workqueue alloc failed\n", __func__); return -ENOMEM; } a2_mux_ctx->a2_mux_rx_workqueue = create_singlethread_workqueue("a2_mux_rx"); if (!a2_mux_ctx->a2_mux_rx_workqueue) { IPAERR("%s: a2_mux_rx_workqueue alloc failed\n", __func__); return -ENOMEM; } return 0; } /** * a2_mux_init() - initialize A2 MUX component * * Returns: 0 on success, negative otherwise */ int a2_mux_init(void) { int rc; u32 h; void *a2_virt_addr; u32 a2_bam_mem_base; u32 a2_bam_mem_size; u32 a2_bam_irq; struct sps_bam_props a2_props; IPADBG("%s A2 MUX\n", __func__); rc = ipa_get_a2_mux_bam_info(&a2_bam_mem_base, &a2_bam_mem_size, &a2_bam_irq); if (rc) { IPAERR("%s: ipa_get_a2_mux_bam_info failed\n", __func__); rc = -EFAULT; goto bail; } a2_virt_addr = ioremap_nocache((unsigned long)(a2_bam_mem_base), a2_bam_mem_size); if (!a2_virt_addr) { IPAERR("%s: ioremap failed\n", __func__); rc = -ENOMEM; goto bail; } memset(&a2_props, 0, sizeof(a2_props)); a2_props.phys_addr = a2_bam_mem_base; a2_props.virt_addr = a2_virt_addr; a2_props.virt_size = a2_bam_mem_size; a2_props.irq = a2_bam_irq; a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP; a2_props.num_pipes = A2_NUM_PIPES; a2_props.summing_threshold = A2_SUMMING_THRESHOLD; a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE; /* need to free on tear down */ rc = sps_register_bam_device(&a2_props, &h); if (rc < 0) { IPAERR("%s: register bam error %d\n", __func__, rc); goto register_bam_failed; } a2_mux_ctx = kzalloc(sizeof(*a2_mux_ctx), GFP_KERNEL); if (!a2_mux_ctx) { IPAERR("%s: a2_mux_ctx alloc failed, rc: %d\n", __func__, rc); rc = -ENOMEM; goto register_bam_failed; } rc = a2_mux_initialize_context(h); if (rc) { IPAERR("%s: a2_mux_initialize_context failed, rc: %d\n", __func__, rc); goto ctx_alloc_failed; } rc = a2_mux_pm_initialize_rm(); if (rc) { IPAERR("%s: a2_mux_pm_initialize_rm failed, rc: %d\n", __func__, rc); goto ctx_alloc_failed; } rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL, a2_mux_smsm_cb, NULL); if (rc) { IPAERR("%s: smsm cb register failed, rc: %d\n", __func__, rc); rc = -ENOMEM; goto ctx_alloc_failed; } rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK, a2_mux_smsm_ack_cb, NULL); if (rc) { IPAERR("%s: smsm ack cb register failed, rc: %d\n", __func__, rc); rc = -ENOMEM; goto smsm_ack_cb_reg_failed; } if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL) a2_mux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE)); /* * Set remote channel open for tethered channel since there is * no actual remote tethered channel */ a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].status |= BAM_CH_REMOTE_OPEN; rc = 0; goto bail; smsm_ack_cb_reg_failed: smsm_state_cb_deregister(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL, a2_mux_smsm_cb, NULL); ctx_alloc_failed: kfree(a2_mux_ctx); register_bam_failed: iounmap(a2_virt_addr); bail: return rc; } /** * a2_mux_exit() - destroy A2 MUX component * * Returns: 0 on success, negative otherwise */ int a2_mux_exit(void) { smsm_state_cb_deregister(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK, a2_mux_smsm_ack_cb, NULL); smsm_state_cb_deregister(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL, a2_mux_smsm_cb, NULL); if (a2_mux_ctx->a2_mux_tx_workqueue) destroy_workqueue(a2_mux_ctx->a2_mux_tx_workqueue); if (a2_mux_ctx->a2_mux_rx_workqueue) destroy_workqueue(a2_mux_ctx->a2_mux_rx_workqueue); return 0; }
gpl-2.0
tempesta-tech/linux-4.8.15-tfw
drivers/mfd/pcf50633-adc.c
2070
6009
/* NXP PCF50633 ADC Driver * * (C) 2006-2008 by Openmoko, Inc. * Author: Balaji Rao <balajirrao@openmoko.org> * All rights reserved. * * Broken down from monstrous PCF50633 driver mainly by * Harald Welte, Andy Green and Werner Almesberger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * NOTE: This driver does not yet support subtractive ADC mode, which means * you can do only one measurement per read request. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/completion.h> #include <linux/mfd/pcf50633/core.h> #include <linux/mfd/pcf50633/adc.h> struct pcf50633_adc_request { int mux; int avg; void (*callback)(struct pcf50633 *, void *, int); void *callback_param; }; struct pcf50633_adc_sync_request { int result; struct completion completion; }; #define PCF50633_MAX_ADC_FIFO_DEPTH 8 struct pcf50633_adc { struct pcf50633 *pcf; /* Private stuff */ struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH]; int queue_head; int queue_tail; struct mutex queue_mutex; }; static inline struct pcf50633_adc *__to_adc(struct pcf50633 *pcf) { return platform_get_drvdata(pcf->adc_pdev); } static void adc_setup(struct pcf50633 *pcf, int channel, int avg) { channel &= PCF50633_ADCC1_ADCMUX_MASK; /* kill ratiometric, but enable ACCSW biasing */ pcf50633_reg_write(pcf, PCF50633_REG_ADCC2, 0x00); pcf50633_reg_write(pcf, PCF50633_REG_ADCC3, 0x01); /* start ADC conversion on selected channel */ pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg | PCF50633_ADCC1_ADCSTART | PCF50633_ADCC1_RES_10BIT); } static void trigger_next_adc_job_if_any(struct pcf50633 *pcf) { struct pcf50633_adc *adc = __to_adc(pcf); int head; head = adc->queue_head; if (!adc->queue[head]) return; adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); } static int adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req) { struct pcf50633_adc *adc = __to_adc(pcf); int head, tail; mutex_lock(&adc->queue_mutex); head = adc->queue_head; tail = adc->queue_tail; if (adc->queue[tail]) { mutex_unlock(&adc->queue_mutex); dev_err(pcf->dev, "ADC queue is full, dropping request\n"); return -EBUSY; } adc->queue[tail] = req; if (head == tail) trigger_next_adc_job_if_any(pcf); adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1); mutex_unlock(&adc->queue_mutex); return 0; } static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result) { struct pcf50633_adc_sync_request *req = param; req->result = result; complete(&req->completion); } int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) { struct pcf50633_adc_sync_request req; int ret; init_completion(&req.completion); ret = pcf50633_adc_async_read(pcf, mux, avg, pcf50633_adc_sync_read_callback, &req); if (ret) return ret; wait_for_completion(&req.completion); return req.result; } EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read); int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, void (*callback)(struct pcf50633 *, void *, int), void *callback_param) { struct pcf50633_adc_request *req; /* req is freed when the result is ready, in interrupt handler */ req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->mux = mux; req->avg = avg; req->callback = callback; req->callback_param = callback_param; return adc_enqueue_request(pcf, req); } EXPORT_SYMBOL_GPL(pcf50633_adc_async_read); static int adc_result(struct pcf50633 *pcf) { u8 adcs1, adcs3; u16 result; adcs1 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS1); adcs3 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS3); result = (adcs1 << 2) | (adcs3 & PCF50633_ADCS3_ADCDAT1L_MASK); dev_dbg(pcf->dev, "adc result = %d\n", result); return result; } static void pcf50633_adc_irq(int irq, void *data) { struct pcf50633_adc *adc = data; struct pcf50633 *pcf = adc->pcf; struct pcf50633_adc_request *req; int head, res; mutex_lock(&adc->queue_mutex); head = adc->queue_head; req = adc->queue[head]; if (WARN_ON(!req)) { dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n"); mutex_unlock(&adc->queue_mutex); return; } adc->queue[head] = NULL; adc->queue_head = (head + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1); res = adc_result(pcf); trigger_next_adc_job_if_any(pcf); mutex_unlock(&adc->queue_mutex); req->callback(pcf, req->callback_param, res); kfree(req); } static int pcf50633_adc_probe(struct platform_device *pdev) { struct pcf50633_adc *adc; adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL); if (!adc) return -ENOMEM; adc->pcf = dev_to_pcf50633(pdev->dev.parent); platform_set_drvdata(pdev, adc); pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY, pcf50633_adc_irq, adc); mutex_init(&adc->queue_mutex); return 0; } static int pcf50633_adc_remove(struct platform_device *pdev) { struct pcf50633_adc *adc = platform_get_drvdata(pdev); int i, head; pcf50633_free_irq(adc->pcf, PCF50633_IRQ_ADCRDY); mutex_lock(&adc->queue_mutex); head = adc->queue_head; if (WARN_ON(adc->queue[head])) dev_err(adc->pcf->dev, "adc driver removed with request pending\n"); for (i = 0; i < PCF50633_MAX_ADC_FIFO_DEPTH; i++) kfree(adc->queue[i]); mutex_unlock(&adc->queue_mutex); return 0; } static struct platform_driver pcf50633_adc_driver = { .driver = { .name = "pcf50633-adc", }, .probe = pcf50633_adc_probe, .remove = pcf50633_adc_remove, }; module_platform_driver(pcf50633_adc_driver); MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>"); MODULE_DESCRIPTION("PCF50633 adc driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pcf50633-adc");
gpl-2.0
oliliango/linux-cedarview_gfx
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
2838
69897
/* * PS3 gelic network driver. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #undef DEBUG #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <linux/if_arp.h> #include <linux/ctype.h> #include <linux/string.h> #include <net/iw_handler.h> #include <linux/dma-mapping.h> #include <net/checksum.h> #include <asm/firmware.h> #include <asm/ps3.h> #include <asm/lv1call.h> #include "ps3_gelic_net.h" #include "ps3_gelic_wireless.h" static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan, u8 *essid, size_t essid_len); static int gelic_wl_try_associate(struct net_device *netdev); /* * tables */ /* 802.11b/g channel to freq in MHz */ static const int channel_freq[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; #define NUM_CHANNELS ARRAY_SIZE(channel_freq) /* in bps */ static const int bitrate_list[] = { 1000000, 2000000, 5500000, 11000000, 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000 }; #define NUM_BITRATES ARRAY_SIZE(bitrate_list) /* * wpa2 support requires the hypervisor version 2.0 or later */ static inline int wpa2_capable(void) { return 0 <= ps3_compare_firmware_version(2, 0, 0); } static inline int precise_ie(void) { return 0 <= ps3_compare_firmware_version(2, 2, 0); } /* * post_eurus_cmd helpers */ struct eurus_cmd_arg_info { int pre_arg; /* command requires arg1, arg2 at POST COMMAND */ int post_arg; /* command requires arg1, arg2 at GET_RESULT */ }; static const struct eurus_cmd_arg_info cmd_info[GELIC_EURUS_CMD_MAX_INDEX] = { [GELIC_EURUS_CMD_SET_COMMON_CFG] = { .pre_arg = 1}, [GELIC_EURUS_CMD_SET_WEP_CFG] = { .pre_arg = 1}, [GELIC_EURUS_CMD_SET_WPA_CFG] = { .pre_arg = 1}, [GELIC_EURUS_CMD_GET_COMMON_CFG] = { .post_arg = 1}, [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1}, [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1}, [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1}, [GELIC_EURUS_CMD_START_SCAN] = { .pre_arg = 1}, [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1}, }; #ifdef DEBUG static const char *cmdstr(enum gelic_eurus_command ix) { switch (ix) { case GELIC_EURUS_CMD_ASSOC: return "ASSOC"; case GELIC_EURUS_CMD_DISASSOC: return "DISASSOC"; case GELIC_EURUS_CMD_START_SCAN: return "SCAN"; case GELIC_EURUS_CMD_GET_SCAN: return "GET SCAN"; case GELIC_EURUS_CMD_SET_COMMON_CFG: return "SET_COMMON_CFG"; case GELIC_EURUS_CMD_GET_COMMON_CFG: return "GET_COMMON_CFG"; case GELIC_EURUS_CMD_SET_WEP_CFG: return "SET_WEP_CFG"; case GELIC_EURUS_CMD_GET_WEP_CFG: return "GET_WEP_CFG"; case GELIC_EURUS_CMD_SET_WPA_CFG: return "SET_WPA_CFG"; case GELIC_EURUS_CMD_GET_WPA_CFG: return "GET_WPA_CFG"; case GELIC_EURUS_CMD_GET_RSSI_CFG: return "GET_RSSI"; default: break; } return ""; }; #else static inline const char *cmdstr(enum gelic_eurus_command ix) { return ""; } #endif /* synchronously do eurus commands */ static void gelic_eurus_sync_cmd_worker(struct work_struct *work) { struct gelic_eurus_cmd *cmd; struct gelic_card *card; struct gelic_wl_info *wl; u64 arg1, arg2; pr_debug("%s: <-\n", __func__); cmd = container_of(work, struct gelic_eurus_cmd, work); BUG_ON(cmd_info[cmd->cmd].pre_arg && cmd_info[cmd->cmd].post_arg); wl = cmd->wl; card = port_to_card(wl_port(wl)); if (cmd_info[cmd->cmd].pre_arg) { arg1 = (cmd->buffer) ? ps3_mm_phys_to_lpar(__pa(cmd->buffer)) : 0; arg2 = cmd->buf_size; } else { arg1 = 0; arg2 = 0; } init_completion(&wl->cmd_done_intr); pr_debug("%s: cmd='%s' start\n", __func__, cmdstr(cmd->cmd)); cmd->status = lv1_net_control(bus_id(card), dev_id(card), GELIC_LV1_POST_WLAN_CMD, cmd->cmd, arg1, arg2, &cmd->tag, &cmd->size); if (cmd->status) { complete(&cmd->done); pr_info("%s: cmd issue failed\n", __func__); return; } wait_for_completion(&wl->cmd_done_intr); if (cmd_info[cmd->cmd].post_arg) { arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer)); arg2 = cmd->buf_size; } else { arg1 = 0; arg2 = 0; } cmd->status = lv1_net_control(bus_id(card), dev_id(card), GELIC_LV1_GET_WLAN_CMD_RESULT, cmd->tag, arg1, arg2, &cmd->cmd_status, &cmd->size); #ifdef DEBUG if (cmd->status || cmd->cmd_status) { pr_debug("%s: cmd done tag=%#lx arg1=%#lx, arg2=%#lx\n", __func__, cmd->tag, arg1, arg2); pr_debug("%s: cmd done status=%#x cmd_status=%#lx size=%#lx\n", __func__, cmd->status, cmd->cmd_status, cmd->size); } #endif complete(&cmd->done); pr_debug("%s: cmd='%s' done\n", __func__, cmdstr(cmd->cmd)); } static struct gelic_eurus_cmd *gelic_eurus_sync_cmd(struct gelic_wl_info *wl, unsigned int eurus_cmd, void *buffer, unsigned int buf_size) { struct gelic_eurus_cmd *cmd; /* allocate cmd */ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return NULL; /* initialize members */ cmd->cmd = eurus_cmd; cmd->buffer = buffer; cmd->buf_size = buf_size; cmd->wl = wl; INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker); init_completion(&cmd->done); queue_work(wl->eurus_cmd_queue, &cmd->work); /* wait for command completion */ wait_for_completion(&cmd->done); return cmd; } static u32 gelic_wl_get_link(struct net_device *netdev) { struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); u32 ret; pr_debug("%s: <-\n", __func__); mutex_lock(&wl->assoc_stat_lock); if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) ret = 1; else ret = 0; mutex_unlock(&wl->assoc_stat_lock); pr_debug("%s: ->\n", __func__); return ret; } static void gelic_wl_send_iwap_event(struct gelic_wl_info *wl, u8 *bssid) { union iwreq_data data; memset(&data, 0, sizeof(data)); if (bssid) memcpy(data.ap_addr.sa_data, bssid, ETH_ALEN); data.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWAP, &data, NULL); } /* * wireless extension handlers and helpers */ /* SIOGIWNAME */ static int gelic_wl_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *iwreq, char *extra) { strcpy(iwreq->name, "IEEE 802.11bg"); return 0; } static void gelic_wl_get_ch_info(struct gelic_wl_info *wl) { struct gelic_card *card = port_to_card(wl_port(wl)); u64 ch_info_raw, tmp; int status; if (!test_and_set_bit(GELIC_WL_STAT_CH_INFO, &wl->stat)) { status = lv1_net_control(bus_id(card), dev_id(card), GELIC_LV1_GET_CHANNEL, 0, 0, 0, &ch_info_raw, &tmp); /* some fw versions may return error */ if (status) { if (status != LV1_NO_ENTRY) pr_info("%s: available ch unknown\n", __func__); wl->ch_info = 0x07ff;/* 11 ch */ } else /* 16 bits of MSB has available channels */ wl->ch_info = ch_info_raw >> 48; } } /* SIOGIWRANGE */ static int gelic_wl_get_range(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *iwreq, char *extra) { struct iw_point *point = &iwreq->data; struct iw_range *range = (struct iw_range *)extra; struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); unsigned int i, chs; pr_debug("%s: <-\n", __func__); point->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 22; /* available channels and frequencies */ gelic_wl_get_ch_info(wl); for (i = 0, chs = 0; i < NUM_CHANNELS && chs < IW_MAX_FREQUENCIES; i++) if (wl->ch_info & (1 << i)) { range->freq[chs].i = i + 1; range->freq[chs].m = channel_freq[i]; range->freq[chs].e = 6; chs++; } range->num_frequency = chs; range->old_num_frequency = chs; range->num_channels = chs; range->old_num_channels = chs; /* bitrates */ for (i = 0; i < NUM_BITRATES; i++) range->bitrate[i] = bitrate_list[i]; range->num_bitrates = i; /* signal levels */ range->max_qual.qual = 100; /* relative value */ range->max_qual.level = 100; range->avg_qual.qual = 50; range->avg_qual.level = 50; range->sensitivity = 0; /* Event capability */ IW_EVENT_CAPA_SET_KERNEL(range->event_capa); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); /* encryption capability */ range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP | IW_ENC_CAPA_4WAY_HANDSHAKE; if (wpa2_capable()) range->enc_capa |= IW_ENC_CAPA_WPA2; range->encoding_size[0] = 5; /* 40bit WEP */ range->encoding_size[1] = 13; /* 104bit WEP */ range->encoding_size[2] = 32; /* WPA-PSK */ range->num_encoding_sizes = 3; range->max_encoding_tokens = GELIC_WEP_KEYS; /* scan capability */ range->scan_capa = IW_SCAN_CAPA_ESSID; pr_debug("%s: ->\n", __func__); return 0; } /* SIOC{G,S}IWSCAN */ static int gelic_wl_set_scan(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct iw_scan_req *req; u8 *essid = NULL; size_t essid_len = 0; if (wrqu->data.length == sizeof(struct iw_scan_req) && wrqu->data.flags & IW_SCAN_THIS_ESSID) { req = (struct iw_scan_req*)extra; essid = req->essid; essid_len = req->essid_len; pr_debug("%s: ESSID scan =%s\n", __func__, essid); } return gelic_wl_start_scan(wl, 1, essid, essid_len); } #define OUI_LEN 3 static const u8 rsn_oui[OUI_LEN] = { 0x00, 0x0f, 0xac }; static const u8 wpa_oui[OUI_LEN] = { 0x00, 0x50, 0xf2 }; /* * synthesize WPA/RSN IE data * See WiFi WPA specification and IEEE 802.11-2007 7.3.2.25 * for the format */ static size_t gelic_wl_synthesize_ie(u8 *buf, struct gelic_eurus_scan_info *scan) { const u8 *oui_header; u8 *start = buf; int rsn; int ccmp; pr_debug("%s: <- sec=%16x\n", __func__, scan->security); switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_MASK) { case GELIC_EURUS_SCAN_SEC_WPA: rsn = 0; break; case GELIC_EURUS_SCAN_SEC_WPA2: rsn = 1; break; default: /* WEP or none. No IE returned */ return 0; } switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_WPA_MASK) { case GELIC_EURUS_SCAN_SEC_WPA_TKIP: ccmp = 0; break; case GELIC_EURUS_SCAN_SEC_WPA_AES: ccmp = 1; break; default: if (rsn) { ccmp = 1; pr_info("%s: no cipher info. defaulted to CCMP\n", __func__); } else { ccmp = 0; pr_info("%s: no cipher info. defaulted to TKIP\n", __func__); } } if (rsn) oui_header = rsn_oui; else oui_header = wpa_oui; /* element id */ if (rsn) *buf++ = WLAN_EID_RSN; else *buf++ = WLAN_EID_VENDOR_SPECIFIC; /* length filed; set later */ buf++; /* wpa special header */ if (!rsn) { memcpy(buf, wpa_oui, OUI_LEN); buf += OUI_LEN; *buf++ = 0x01; } /* version */ *buf++ = 0x01; /* version 1.0 */ *buf++ = 0x00; /* group cipher */ memcpy(buf, oui_header, OUI_LEN); buf += OUI_LEN; if (ccmp) *buf++ = 0x04; /* CCMP */ else *buf++ = 0x02; /* TKIP */ /* pairwise key count always 1 */ *buf++ = 0x01; *buf++ = 0x00; /* pairwise key suit */ memcpy(buf, oui_header, OUI_LEN); buf += OUI_LEN; if (ccmp) *buf++ = 0x04; /* CCMP */ else *buf++ = 0x02; /* TKIP */ /* AKM count is 1 */ *buf++ = 0x01; *buf++ = 0x00; /* AKM suite is assumed as PSK*/ memcpy(buf, oui_header, OUI_LEN); buf += OUI_LEN; *buf++ = 0x02; /* PSK */ /* RSN capabilities is 0 */ *buf++ = 0x00; *buf++ = 0x00; /* set length field */ start[1] = (buf - start - 2); pr_debug("%s: ->\n", __func__); return buf - start; } struct ie_item { u8 *data; u8 len; }; struct ie_info { struct ie_item wpa; struct ie_item rsn; }; static void gelic_wl_parse_ie(u8 *data, size_t len, struct ie_info *ie_info) { size_t data_left = len; u8 *pos = data; u8 item_len; u8 item_id; pr_debug("%s: data=%p len=%ld\n", __func__, data, len); memset(ie_info, 0, sizeof(struct ie_info)); while (2 <= data_left) { item_id = *pos++; item_len = *pos++; data_left -= 2; if (data_left < item_len) break; switch (item_id) { case WLAN_EID_VENDOR_SPECIFIC: if ((OUI_LEN + 1 <= item_len) && !memcmp(pos, wpa_oui, OUI_LEN) && pos[OUI_LEN] == 0x01) { ie_info->wpa.data = pos - 2; ie_info->wpa.len = item_len + 2; } break; case WLAN_EID_RSN: ie_info->rsn.data = pos - 2; /* length includes the header */ ie_info->rsn.len = item_len + 2; break; default: pr_debug("%s: ignore %#x,%d\n", __func__, item_id, item_len); break; } pos += item_len; data_left -= item_len; } pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__, ie_info->wpa.data, ie_info->wpa.len, ie_info->rsn.data, ie_info->rsn.len); } /* * translate the scan informations from hypervisor to a * independent format */ static char *gelic_wl_translate_scan(struct net_device *netdev, struct iw_request_info *info, char *ev, char *stop, struct gelic_wl_scan_info *network) { struct iw_event iwe; struct gelic_eurus_scan_info *scan = network->hwinfo; char *tmp; u8 rate; unsigned int i, j, len; u8 buf[64]; /* arbitrary size large enough */ pr_debug("%s: <-\n", __func__); /* first entry should be AP's mac address */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, &scan->bssid[2], ETH_ALEN); ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_ADDR_LEN); /* ESSID */ iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; iwe.u.data.length = strnlen(scan->essid, 32); ev = iwe_stream_add_point(info, ev, stop, &iwe, scan->essid); /* FREQUENCY */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = be16_to_cpu(scan->channel); iwe.u.freq.e = 0; /* table value in MHz */ iwe.u.freq.i = 0; ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_FREQ_LEN); /* RATES */ iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; /* to stuff multiple values in one event */ tmp = ev + iwe_stream_lcp_len(info); /* put them in ascendant order (older is first) */ i = 0; j = 0; pr_debug("%s: rates=%d rate=%d\n", __func__, network->rate_len, network->rate_ext_len); while (i < network->rate_len) { if (j < network->rate_ext_len && ((scan->ext_rate[j] & 0x7f) < (scan->rate[i] & 0x7f))) rate = scan->ext_rate[j++] & 0x7f; else rate = scan->rate[i++] & 0x7f; iwe.u.bitrate.value = rate * 500000; /* 500kbps unit */ tmp = iwe_stream_add_value(info, ev, tmp, stop, &iwe, IW_EV_PARAM_LEN); } while (j < network->rate_ext_len) { iwe.u.bitrate.value = (scan->ext_rate[j++] & 0x7f) * 500000; tmp = iwe_stream_add_value(info, ev, tmp, stop, &iwe, IW_EV_PARAM_LEN); } /* Check if we added any rate */ if (iwe_stream_lcp_len(info) < (tmp - ev)) ev = tmp; /* ENCODE */ iwe.cmd = SIOCGIWENCODE; if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; ev = iwe_stream_add_point(info, ev, stop, &iwe, scan->essid); /* MODE */ iwe.cmd = SIOCGIWMODE; if (be16_to_cpu(scan->capability) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_ESS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_UINT_LEN); } /* QUAL */ iwe.cmd = IWEVQUAL; iwe.u.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID; iwe.u.qual.level = be16_to_cpu(scan->rssi); iwe.u.qual.qual = be16_to_cpu(scan->rssi); iwe.u.qual.noise = 0; ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_QUAL_LEN); /* RSN */ memset(&iwe, 0, sizeof(iwe)); if (be16_to_cpu(scan->size) <= sizeof(*scan)) { /* If wpa[2] capable station, synthesize IE and put it */ len = gelic_wl_synthesize_ie(buf, scan); if (len) { iwe.cmd = IWEVGENIE; iwe.u.data.length = len; ev = iwe_stream_add_point(info, ev, stop, &iwe, buf); } } else { /* this scan info has IE data */ struct ie_info ie_info; size_t data_len; data_len = be16_to_cpu(scan->size) - sizeof(*scan); gelic_wl_parse_ie(scan->elements, data_len, &ie_info); if (ie_info.wpa.len && (ie_info.wpa.len <= sizeof(buf))) { memcpy(buf, ie_info.wpa.data, ie_info.wpa.len); iwe.cmd = IWEVGENIE; iwe.u.data.length = ie_info.wpa.len; ev = iwe_stream_add_point(info, ev, stop, &iwe, buf); } if (ie_info.rsn.len && (ie_info.rsn.len <= sizeof(buf))) { memset(&iwe, 0, sizeof(iwe)); memcpy(buf, ie_info.rsn.data, ie_info.rsn.len); iwe.cmd = IWEVGENIE; iwe.u.data.length = ie_info.rsn.len; ev = iwe_stream_add_point(info, ev, stop, &iwe, buf); } } pr_debug("%s: ->\n", __func__); return ev; } static int gelic_wl_get_scan(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct gelic_wl_scan_info *scan_info; char *ev = extra; char *stop = ev + wrqu->data.length; int ret = 0; unsigned long this_time = jiffies; pr_debug("%s: <-\n", __func__); if (mutex_lock_interruptible(&wl->scan_lock)) return -EAGAIN; switch (wl->scan_stat) { case GELIC_WL_SCAN_STAT_SCANNING: /* If a scan in progress, caller should call me again */ ret = -EAGAIN; goto out; break; case GELIC_WL_SCAN_STAT_INIT: /* last scan request failed or never issued */ ret = -ENODEV; goto out; break; case GELIC_WL_SCAN_STAT_GOT_LIST: /* ok, use current list */ break; } list_for_each_entry(scan_info, &wl->network_list, list) { if (wl->scan_age == 0 || time_after(scan_info->last_scanned + wl->scan_age, this_time)) ev = gelic_wl_translate_scan(netdev, info, ev, stop, scan_info); else pr_debug("%s:entry too old\n", __func__); if (stop - ev <= IW_EV_ADDR_LEN) { ret = -E2BIG; goto out; } } wrqu->data.length = ev - extra; wrqu->data.flags = 0; out: mutex_unlock(&wl->scan_lock); pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length); return ret; } #ifdef DEBUG static void scan_list_dump(struct gelic_wl_info *wl) { struct gelic_wl_scan_info *scan_info; int i; i = 0; list_for_each_entry(scan_info, &wl->network_list, list) { pr_debug("%s: item %d\n", __func__, i++); pr_debug("valid=%d eurusindex=%d last=%lx\n", scan_info->valid, scan_info->eurus_index, scan_info->last_scanned); pr_debug("r_len=%d r_ext_len=%d essid_len=%d\n", scan_info->rate_len, scan_info->rate_ext_len, scan_info->essid_len); /* -- */ pr_debug("bssid=%pM\n", &scan_info->hwinfo->bssid[2]); pr_debug("essid=%s\n", scan_info->hwinfo->essid); } } #endif static int gelic_wl_set_auth(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct iw_param *param = &data->param; struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); unsigned long irqflag; int ret = 0; pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX); spin_lock_irqsave(&wl->lock, irqflag); switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: if (param->value & IW_AUTH_WPA_VERSION_DISABLED) { pr_debug("%s: NO WPA selected\n", __func__); wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; wl->group_cipher_method = GELIC_WL_CIPHER_WEP; wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; } if (param->value & IW_AUTH_WPA_VERSION_WPA) { pr_debug("%s: WPA version 1 selected\n", __func__); wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA; wl->group_cipher_method = GELIC_WL_CIPHER_TKIP; wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP; wl->auth_method = GELIC_EURUS_AUTH_OPEN; } if (param->value & IW_AUTH_WPA_VERSION_WPA2) { /* * As the hypervisor may not tell the cipher * information of the AP if it is WPA2, * you will not decide suitable cipher from * its beacon. * You should have knowledge about the AP's * cipher information in other method prior to * the association. */ if (!precise_ie()) pr_info("%s: WPA2 may not work\n", __func__); if (wpa2_capable()) { wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2; wl->group_cipher_method = GELIC_WL_CIPHER_AES; wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES; wl->auth_method = GELIC_EURUS_AUTH_OPEN; } else ret = -EINVAL; } break; case IW_AUTH_CIPHER_PAIRWISE: if (param->value & (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) { pr_debug("%s: WEP selected\n", __func__); wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; } if (param->value & IW_AUTH_CIPHER_TKIP) { pr_debug("%s: TKIP selected\n", __func__); wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP; } if (param->value & IW_AUTH_CIPHER_CCMP) { pr_debug("%s: CCMP selected\n", __func__); wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES; } if (param->value & IW_AUTH_CIPHER_NONE) { pr_debug("%s: no auth selected\n", __func__); wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; } break; case IW_AUTH_CIPHER_GROUP: if (param->value & (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) { pr_debug("%s: WEP selected\n", __func__); wl->group_cipher_method = GELIC_WL_CIPHER_WEP; } if (param->value & IW_AUTH_CIPHER_TKIP) { pr_debug("%s: TKIP selected\n", __func__); wl->group_cipher_method = GELIC_WL_CIPHER_TKIP; } if (param->value & IW_AUTH_CIPHER_CCMP) { pr_debug("%s: CCMP selected\n", __func__); wl->group_cipher_method = GELIC_WL_CIPHER_AES; } if (param->value & IW_AUTH_CIPHER_NONE) { pr_debug("%s: no auth selected\n", __func__); wl->group_cipher_method = GELIC_WL_CIPHER_NONE; } break; case IW_AUTH_80211_AUTH_ALG: if (param->value & IW_AUTH_ALG_SHARED_KEY) { pr_debug("%s: shared key specified\n", __func__); wl->auth_method = GELIC_EURUS_AUTH_SHARED; } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { pr_debug("%s: open system specified\n", __func__); wl->auth_method = GELIC_EURUS_AUTH_OPEN; } else ret = -EINVAL; break; case IW_AUTH_WPA_ENABLED: if (param->value) { pr_debug("%s: WPA enabled\n", __func__); wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA; } else { pr_debug("%s: WPA disabled\n", __func__); wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; } break; case IW_AUTH_KEY_MGMT: if (param->value & IW_AUTH_KEY_MGMT_PSK) break; /* intentionally fall through */ default: ret = -EOPNOTSUPP; break; } if (!ret) set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: -> %d\n", __func__, ret); return ret; } static int gelic_wl_get_auth(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *iwreq, char *extra) { struct iw_param *param = &iwreq->param; struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); unsigned long irqflag; int ret = 0; pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX); spin_lock_irqsave(&wl->lock, irqflag); switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: switch (wl->wpa_level) { case GELIC_WL_WPA_LEVEL_WPA: param->value |= IW_AUTH_WPA_VERSION_WPA; break; case GELIC_WL_WPA_LEVEL_WPA2: param->value |= IW_AUTH_WPA_VERSION_WPA2; break; default: param->value |= IW_AUTH_WPA_VERSION_DISABLED; } break; case IW_AUTH_80211_AUTH_ALG: if (wl->auth_method == GELIC_EURUS_AUTH_SHARED) param->value = IW_AUTH_ALG_SHARED_KEY; else if (wl->auth_method == GELIC_EURUS_AUTH_OPEN) param->value = IW_AUTH_ALG_OPEN_SYSTEM; break; case IW_AUTH_WPA_ENABLED: switch (wl->wpa_level) { case GELIC_WL_WPA_LEVEL_WPA: case GELIC_WL_WPA_LEVEL_WPA2: param->value = 1; break; default: param->value = 0; break; } break; default: ret = -EOPNOTSUPP; } spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: -> %d\n", __func__, ret); return ret; } /* SIOC{S,G}IWESSID */ static int gelic_wl_set_essid(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); unsigned long irqflag; pr_debug("%s: <- l=%d f=%d\n", __func__, data->essid.length, data->essid.flags); if (IW_ESSID_MAX_SIZE < data->essid.length) return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); if (data->essid.flags) { wl->essid_len = data->essid.length; memcpy(wl->essid, extra, wl->essid_len); pr_debug("%s: essid = '%s'\n", __func__, extra); set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); } else { pr_debug("%s: ESSID any\n", __func__); clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); } set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); spin_unlock_irqrestore(&wl->lock, irqflag); gelic_wl_try_associate(netdev); /* FIXME */ pr_debug("%s: ->\n", __func__); return 0; } static int gelic_wl_get_essid(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); unsigned long irqflag; pr_debug("%s: <-\n", __func__); mutex_lock(&wl->assoc_stat_lock); spin_lock_irqsave(&wl->lock, irqflag); if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) || wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { memcpy(extra, wl->essid, wl->essid_len); data->essid.length = wl->essid_len; data->essid.flags = 1; } else data->essid.flags = 0; mutex_unlock(&wl->assoc_stat_lock); spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: -> len=%d\n", __func__, data->essid.length); return 0; } /* SIO{S,G}IWENCODE */ static int gelic_wl_set_encode(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct iw_point *enc = &data->encoding; __u16 flags; unsigned long irqflag; int key_index, index_specified; int ret = 0; pr_debug("%s: <-\n", __func__); flags = enc->flags & IW_ENCODE_FLAGS; key_index = enc->flags & IW_ENCODE_INDEX; pr_debug("%s: key_index = %d\n", __func__, key_index); pr_debug("%s: key_len = %d\n", __func__, enc->length); pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); if (GELIC_WEP_KEYS < key_index) return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); if (key_index) { index_specified = 1; key_index--; } else { index_specified = 0; key_index = wl->current_key; } if (flags & IW_ENCODE_NOKEY) { /* if just IW_ENCODE_NOKEY, change current key index */ if (!flags && index_specified) { wl->current_key = key_index; goto done; } if (flags & IW_ENCODE_DISABLED) { if (!index_specified) { /* disable encryption */ wl->group_cipher_method = GELIC_WL_CIPHER_NONE; wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; /* invalidate all key */ wl->key_enabled = 0; } else clear_bit(key_index, &wl->key_enabled); } if (flags & IW_ENCODE_OPEN) wl->auth_method = GELIC_EURUS_AUTH_OPEN; if (flags & IW_ENCODE_RESTRICTED) { pr_info("%s: shared key mode enabled\n", __func__); wl->auth_method = GELIC_EURUS_AUTH_SHARED; } } else { if (IW_ENCODING_TOKEN_MAX < enc->length) { ret = -EINVAL; goto done; } wl->key_len[key_index] = enc->length; memcpy(wl->key[key_index], extra, enc->length); set_bit(key_index, &wl->key_enabled); wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; wl->group_cipher_method = GELIC_WL_CIPHER_WEP; } set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); done: spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: ->\n", __func__); return ret; } static int gelic_wl_get_encode(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct iw_point *enc = &data->encoding; unsigned long irqflag; unsigned int key_index, index_specified; int ret = 0; pr_debug("%s: <-\n", __func__); key_index = enc->flags & IW_ENCODE_INDEX; pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__, enc->flags, enc->pointer, enc->length, extra); if (GELIC_WEP_KEYS < key_index) return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); if (key_index) { index_specified = 1; key_index--; } else { index_specified = 0; key_index = wl->current_key; } if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { switch (wl->auth_method) { case GELIC_EURUS_AUTH_OPEN: enc->flags = IW_ENCODE_OPEN; break; case GELIC_EURUS_AUTH_SHARED: enc->flags = IW_ENCODE_RESTRICTED; break; } } else enc->flags = IW_ENCODE_DISABLED; if (test_bit(key_index, &wl->key_enabled)) { if (enc->length < wl->key_len[key_index]) { ret = -EINVAL; goto done; } enc->length = wl->key_len[key_index]; memcpy(extra, wl->key[key_index], wl->key_len[key_index]); } else { enc->length = 0; enc->flags |= IW_ENCODE_NOKEY; } enc->flags |= key_index + 1; pr_debug("%s: -> flag=%x len=%d\n", __func__, enc->flags, enc->length); done: spin_unlock_irqrestore(&wl->lock, irqflag); return ret; } /* SIOC{S,G}IWAP */ static int gelic_wl_set_ap(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); unsigned long irqflag; pr_debug("%s: <-\n", __func__); if (data->ap_addr.sa_family != ARPHRD_ETHER) return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); if (is_valid_ether_addr(data->ap_addr.sa_data)) { memcpy(wl->bssid, data->ap_addr.sa_data, ETH_ALEN); set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); pr_debug("%s: bss=%pM\n", __func__, wl->bssid); } else { pr_debug("%s: clear bssid\n", __func__); clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); memset(wl->bssid, 0, ETH_ALEN); } spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: ->\n", __func__); return 0; } static int gelic_wl_get_ap(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); unsigned long irqflag; pr_debug("%s: <-\n", __func__); mutex_lock(&wl->assoc_stat_lock); spin_lock_irqsave(&wl->lock, irqflag); if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { data->ap_addr.sa_family = ARPHRD_ETHER; memcpy(data->ap_addr.sa_data, wl->active_bssid, ETH_ALEN); } else memset(data->ap_addr.sa_data, 0, ETH_ALEN); spin_unlock_irqrestore(&wl->lock, irqflag); mutex_unlock(&wl->assoc_stat_lock); pr_debug("%s: ->\n", __func__); return 0; } /* SIOC{S,G}IWENCODEEXT */ static int gelic_wl_set_encodeext(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct iw_point *enc = &data->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; __u16 alg; __u16 flags; unsigned long irqflag; int key_index; int ret = 0; pr_debug("%s: <-\n", __func__); flags = enc->flags & IW_ENCODE_FLAGS; alg = ext->alg; key_index = enc->flags & IW_ENCODE_INDEX; pr_debug("%s: key_index = %d\n", __func__, key_index); pr_debug("%s: key_len = %d\n", __func__, enc->length); pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); pr_debug("%s: ext_flag=%x\n", __func__, ext->ext_flags); pr_debug("%s: ext_key_len=%x\n", __func__, ext->key_len); if (GELIC_WEP_KEYS < key_index) return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); if (key_index) key_index--; else key_index = wl->current_key; if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) { /* reques to change default key index */ pr_debug("%s: request to change default key to %d\n", __func__, key_index); wl->current_key = key_index; goto done; } if (alg == IW_ENCODE_ALG_NONE || (flags & IW_ENCODE_DISABLED)) { pr_debug("%s: alg disabled\n", __func__); wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; wl->group_cipher_method = GELIC_WL_CIPHER_NONE; wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; wl->auth_method = GELIC_EURUS_AUTH_OPEN; /* should be open */ } else if (alg == IW_ENCODE_ALG_WEP) { pr_debug("%s: WEP requested\n", __func__); if (flags & IW_ENCODE_OPEN) { pr_debug("%s: open key mode\n", __func__); wl->auth_method = GELIC_EURUS_AUTH_OPEN; } if (flags & IW_ENCODE_RESTRICTED) { pr_debug("%s: shared key mode\n", __func__); wl->auth_method = GELIC_EURUS_AUTH_SHARED; } if (IW_ENCODING_TOKEN_MAX < ext->key_len) { pr_info("%s: key is too long %d\n", __func__, ext->key_len); ret = -EINVAL; goto done; } /* OK, update the key */ wl->key_len[key_index] = ext->key_len; memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX); memcpy(wl->key[key_index], ext->key, ext->key_len); set_bit(key_index, &wl->key_enabled); /* remember wep info changed */ set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); } else if (alg == IW_ENCODE_ALG_PMK) { if (ext->key_len != WPA_PSK_LEN) { pr_err("%s: PSK length wrong %d\n", __func__, ext->key_len); ret = -EINVAL; goto done; } memset(wl->psk, 0, sizeof(wl->psk)); memcpy(wl->psk, ext->key, ext->key_len); wl->psk_len = ext->key_len; wl->psk_type = GELIC_EURUS_WPA_PSK_BIN; /* remember PSK configured */ set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat); } done: spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: ->\n", __func__); return ret; } static int gelic_wl_get_encodeext(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct iw_point *enc = &data->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; unsigned long irqflag; int key_index; int ret = 0; int max_key_len; pr_debug("%s: <-\n", __func__); max_key_len = enc->length - sizeof(struct iw_encode_ext); if (max_key_len < 0) return -EINVAL; key_index = enc->flags & IW_ENCODE_INDEX; pr_debug("%s: key_index = %d\n", __func__, key_index); pr_debug("%s: key_len = %d\n", __func__, enc->length); pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); if (GELIC_WEP_KEYS < key_index) return -EINVAL; spin_lock_irqsave(&wl->lock, irqflag); if (key_index) key_index--; else key_index = wl->current_key; memset(ext, 0, sizeof(struct iw_encode_ext)); switch (wl->group_cipher_method) { case GELIC_WL_CIPHER_WEP: ext->alg = IW_ENCODE_ALG_WEP; enc->flags |= IW_ENCODE_ENABLED; break; case GELIC_WL_CIPHER_TKIP: ext->alg = IW_ENCODE_ALG_TKIP; enc->flags |= IW_ENCODE_ENABLED; break; case GELIC_WL_CIPHER_AES: ext->alg = IW_ENCODE_ALG_CCMP; enc->flags |= IW_ENCODE_ENABLED; break; case GELIC_WL_CIPHER_NONE: default: ext->alg = IW_ENCODE_ALG_NONE; enc->flags |= IW_ENCODE_NOKEY; break; } if (!(enc->flags & IW_ENCODE_NOKEY)) { if (max_key_len < wl->key_len[key_index]) { ret = -E2BIG; goto out; } if (test_bit(key_index, &wl->key_enabled)) memcpy(ext->key, wl->key[key_index], wl->key_len[key_index]); else pr_debug("%s: disabled key requested ix=%d\n", __func__, key_index); } out: spin_unlock_irqrestore(&wl->lock, irqflag); pr_debug("%s: ->\n", __func__); return ret; } /* SIOC{S,G}IWMODE */ static int gelic_wl_set_mode(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { __u32 mode = data->mode; int ret; pr_debug("%s: <-\n", __func__); if (mode == IW_MODE_INFRA) ret = 0; else ret = -EOPNOTSUPP; pr_debug("%s: -> %d\n", __func__, ret); return ret; } static int gelic_wl_get_mode(struct net_device *netdev, struct iw_request_info *info, union iwreq_data *data, char *extra) { __u32 *mode = &data->mode; pr_debug("%s: <-\n", __func__); *mode = IW_MODE_INFRA; pr_debug("%s: ->\n", __func__); return 0; } /* SIOCGIWNICKN */ static int gelic_wl_get_nick(struct net_device *net_dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { strcpy(extra, "gelic_wl"); data->data.length = strlen(extra); data->data.flags = 1; return 0; } /* --- */ static struct iw_statistics *gelic_wl_get_wireless_stats( struct net_device *netdev) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); struct gelic_eurus_cmd *cmd; struct iw_statistics *is; struct gelic_eurus_rssi_info *rssi; void *buf; pr_debug("%s: <-\n", __func__); buf = (void *)__get_free_page(GFP_KERNEL); if (!buf) return NULL; is = &wl->iwstat; memset(is, 0, sizeof(*is)); cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG, buf, sizeof(*rssi)); if (cmd && !cmd->status && !cmd->cmd_status) { rssi = buf; is->qual.level = be16_to_cpu(rssi->rssi); is->qual.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID; } else /* not associated */ is->qual.updated = IW_QUAL_ALL_INVALID; kfree(cmd); free_page((unsigned long)buf); pr_debug("%s: ->\n", __func__); return is; } /* * scanning helpers */ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan, u8 *essid, size_t essid_len) { struct gelic_eurus_cmd *cmd; int ret = 0; void *buf = NULL; size_t len; pr_debug("%s: <- always=%d\n", __func__, always_scan); if (mutex_lock_interruptible(&wl->scan_lock)) return -ERESTARTSYS; /* * If already a scan in progress, do not trigger more */ if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) { pr_debug("%s: scanning now\n", __func__); goto out; } init_completion(&wl->scan_done); /* * If we have already a bss list, don't try to get new * unless we are doing an ESSID scan */ if ((!essid_len && !always_scan) && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) { pr_debug("%s: already has the list\n", __func__); complete(&wl->scan_done); goto out; } /* ESSID scan ? */ if (essid_len && essid) { buf = (void *)__get_free_page(GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } len = IW_ESSID_MAX_SIZE; /* hypervisor always requires 32 */ memset(buf, 0, len); memcpy(buf, essid, essid_len); pr_debug("%s: essid scan='%s'\n", __func__, (char *)buf); } else len = 0; /* * issue start scan request */ wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING; cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN, buf, len); if (!cmd || cmd->status || cmd->cmd_status) { wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; complete(&wl->scan_done); ret = -ENOMEM; goto out; } kfree(cmd); out: free_page((unsigned long)buf); mutex_unlock(&wl->scan_lock); pr_debug("%s: ->\n", __func__); return ret; } /* * retrieve scan result from the chip (hypervisor) * this function is invoked by schedule work. */ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl) { struct gelic_eurus_cmd *cmd = NULL; struct gelic_wl_scan_info *target, *tmp; struct gelic_wl_scan_info *oldest = NULL; struct gelic_eurus_scan_info *scan_info; unsigned int scan_info_size; union iwreq_data data; unsigned long this_time = jiffies; unsigned int data_len, i, found, r; void *buf; pr_debug("%s:start\n", __func__); mutex_lock(&wl->scan_lock); buf = (void *)__get_free_page(GFP_KERNEL); if (!buf) { pr_info("%s: scan buffer alloc failed\n", __func__); goto out; } if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) { /* * stop() may be called while scanning, ignore result */ pr_debug("%s: scan complete when stat != scanning(%d)\n", __func__, wl->scan_stat); goto out; } cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN, buf, PAGE_SIZE); if (!cmd || cmd->status || cmd->cmd_status) { wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; pr_info("%s:cmd failed\n", __func__); kfree(cmd); goto out; } data_len = cmd->size; pr_debug("%s: data_len = %d\n", __func__, data_len); kfree(cmd); /* OK, bss list retrieved */ wl->scan_stat = GELIC_WL_SCAN_STAT_GOT_LIST; /* mark all entries are old */ list_for_each_entry_safe(target, tmp, &wl->network_list, list) { target->valid = 0; /* expire too old entries */ if (time_before(target->last_scanned + wl->scan_age, this_time)) { kfree(target->hwinfo); target->hwinfo = NULL; list_move_tail(&target->list, &wl->network_free_list); } } /* put them in the network_list */ for (i = 0, scan_info_size = 0, scan_info = buf; scan_info_size < data_len; i++, scan_info_size += be16_to_cpu(scan_info->size), scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) { pr_debug("%s:size=%d bssid=%pM scan_info=%p\n", __func__, be16_to_cpu(scan_info->size), &scan_info->bssid[2], scan_info); /* * The wireless firmware may return invalid channel 0 and/or * invalid rate if the AP emits zero length SSID ie. As this * scan information is useless, ignore it */ if (!be16_to_cpu(scan_info->channel) || !scan_info->rate[0]) { pr_debug("%s: invalid scan info\n", __func__); continue; } found = 0; oldest = NULL; list_for_each_entry(target, &wl->network_list, list) { if (ether_addr_equal(&target->hwinfo->bssid[2], &scan_info->bssid[2])) { found = 1; pr_debug("%s: same BBS found scanned list\n", __func__); break; } if (!oldest || (target->last_scanned < oldest->last_scanned)) oldest = target; } if (!found) { /* not found in the list */ if (list_empty(&wl->network_free_list)) { /* expire oldest */ target = oldest; } else { target = list_entry(wl->network_free_list.next, struct gelic_wl_scan_info, list); } } /* update the item */ target->last_scanned = this_time; target->valid = 1; target->eurus_index = i; kfree(target->hwinfo); target->hwinfo = kzalloc(be16_to_cpu(scan_info->size), GFP_KERNEL); if (!target->hwinfo) continue; /* copy hw scan info */ memcpy(target->hwinfo, scan_info, scan_info->size); target->essid_len = strnlen(scan_info->essid, sizeof(scan_info->essid)); target->rate_len = 0; for (r = 0; r < 12; r++) if (scan_info->rate[r]) target->rate_len++; if (8 < target->rate_len) pr_info("%s: AP returns %d rates\n", __func__, target->rate_len); target->rate_ext_len = 0; for (r = 0; r < 16; r++) if (scan_info->ext_rate[r]) target->rate_ext_len++; list_move_tail(&target->list, &wl->network_list); } memset(&data, 0, sizeof(data)); wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data, NULL); out: free_page((unsigned long)buf); complete(&wl->scan_done); mutex_unlock(&wl->scan_lock); pr_debug("%s:end\n", __func__); } /* * Select an appropriate bss from current scan list regarding * current settings from userspace. * The caller must hold wl->scan_lock, * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST */ static void update_best(struct gelic_wl_scan_info **best, struct gelic_wl_scan_info *candid, int *best_weight, int *weight) { if (*best_weight < ++(*weight)) { *best_weight = *weight; *best = candid; } } static struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) { struct gelic_wl_scan_info *scan_info; struct gelic_wl_scan_info *best_bss; int weight, best_weight; u16 security; pr_debug("%s: <-\n", __func__); best_bss = NULL; best_weight = 0; list_for_each_entry(scan_info, &wl->network_list, list) { pr_debug("%s: station %p\n", __func__, scan_info); if (!scan_info->valid) { pr_debug("%s: station invalid\n", __func__); continue; } /* If bss specified, check it only */ if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) { if (ether_addr_equal(&scan_info->hwinfo->bssid[2], wl->bssid)) { best_bss = scan_info; pr_debug("%s: bssid matched\n", __func__); break; } else { pr_debug("%s: bssid unmached\n", __func__); continue; } } weight = 0; /* security */ security = be16_to_cpu(scan_info->hwinfo->security) & GELIC_EURUS_SCAN_SEC_MASK; if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) { if (security == GELIC_EURUS_SCAN_SEC_WPA2) update_best(&best_bss, scan_info, &best_weight, &weight); else continue; } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA) { if (security == GELIC_EURUS_SCAN_SEC_WPA) update_best(&best_bss, scan_info, &best_weight, &weight); else continue; } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_NONE && wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { if (security == GELIC_EURUS_SCAN_SEC_WEP) update_best(&best_bss, scan_info, &best_weight, &weight); else continue; } /* If ESSID is set, check it */ if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) { if ((scan_info->essid_len == wl->essid_len) && !strncmp(wl->essid, scan_info->hwinfo->essid, scan_info->essid_len)) update_best(&best_bss, scan_info, &best_weight, &weight); else continue; } } #ifdef DEBUG pr_debug("%s: -> bss=%p\n", __func__, best_bss); if (best_bss) { pr_debug("%s:addr=%pM\n", __func__, &best_bss->hwinfo->bssid[2]); } #endif return best_bss; } /* * Setup WEP configuration to the chip * The caller must hold wl->scan_lock, * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST */ static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl) { unsigned int i; struct gelic_eurus_wep_cfg *wep; struct gelic_eurus_cmd *cmd; int wep104 = 0; int have_key = 0; int ret = 0; pr_debug("%s: <-\n", __func__); /* we can assume no one should uses the buffer */ wep = (struct gelic_eurus_wep_cfg *)__get_free_page(GFP_KERNEL); if (!wep) return -ENOMEM; memset(wep, 0, sizeof(*wep)); if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { pr_debug("%s: WEP mode\n", __func__); for (i = 0; i < GELIC_WEP_KEYS; i++) { if (!test_bit(i, &wl->key_enabled)) continue; pr_debug("%s: key#%d enabled\n", __func__, i); have_key = 1; if (wl->key_len[i] == 13) wep104 = 1; else if (wl->key_len[i] != 5) { pr_info("%s: wrong wep key[%d]=%d\n", __func__, i, wl->key_len[i]); ret = -EINVAL; goto out; } memcpy(wep->key[i], wl->key[i], wl->key_len[i]); } if (!have_key) { pr_info("%s: all wep key disabled\n", __func__); ret = -EINVAL; goto out; } if (wep104) { pr_debug("%s: 104bit key\n", __func__); wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_104BIT); } else { pr_debug("%s: 40bit key\n", __func__); wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_40BIT); } } else { pr_debug("%s: NO encryption\n", __func__); wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_NONE); } /* issue wep setup */ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WEP_CFG, wep, sizeof(*wep)); if (!cmd) ret = -ENOMEM; else if (cmd->status || cmd->cmd_status) ret = -ENXIO; kfree(cmd); out: free_page((unsigned long)wep); pr_debug("%s: ->\n", __func__); return ret; } #ifdef DEBUG static const char *wpasecstr(enum gelic_eurus_wpa_security sec) { switch (sec) { case GELIC_EURUS_WPA_SEC_NONE: return "NONE"; break; case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP: return "WPA_TKIP_TKIP"; break; case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES: return "WPA_TKIP_AES"; break; case GELIC_EURUS_WPA_SEC_WPA_AES_AES: return "WPA_AES_AES"; break; case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP: return "WPA2_TKIP_TKIP"; break; case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES: return "WPA2_TKIP_AES"; break; case GELIC_EURUS_WPA_SEC_WPA2_AES_AES: return "WPA2_AES_AES"; break; } return ""; }; #endif static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl) { struct gelic_eurus_wpa_cfg *wpa; struct gelic_eurus_cmd *cmd; u16 security; int ret = 0; pr_debug("%s: <-\n", __func__); /* we can assume no one should uses the buffer */ wpa = (struct gelic_eurus_wpa_cfg *)__get_free_page(GFP_KERNEL); if (!wpa) return -ENOMEM; memset(wpa, 0, sizeof(*wpa)); if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) pr_info("%s: PSK not configured yet\n", __func__); /* copy key */ memcpy(wpa->psk, wl->psk, wl->psk_len); /* set security level */ if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) { if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) { security = GELIC_EURUS_WPA_SEC_WPA2_AES_AES; } else { if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES && precise_ie()) security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES; else security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP; } } else { if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) { security = GELIC_EURUS_WPA_SEC_WPA_AES_AES; } else { if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES && precise_ie()) security = GELIC_EURUS_WPA_SEC_WPA_TKIP_AES; else security = GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP; } } wpa->security = cpu_to_be16(security); /* PSK type */ wpa->psk_type = cpu_to_be16(wl->psk_type); #ifdef DEBUG pr_debug("%s: sec=%s psktype=%s\n", __func__, wpasecstr(wpa->security), (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? "BIN" : "passphrase"); #if 0 /* * don't enable here if you plan to submit * the debug log because this dumps your precious * passphrase/key. */ pr_debug("%s: psk=%s\n", __func__, (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? "N/A" : wpa->psk); #endif #endif /* issue wpa setup */ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WPA_CFG, wpa, sizeof(*wpa)); if (!cmd) ret = -ENOMEM; else if (cmd->status || cmd->cmd_status) ret = -ENXIO; kfree(cmd); free_page((unsigned long)wpa); pr_debug("%s: --> %d\n", __func__, ret); return ret; } /* * Start association. caller must hold assoc_stat_lock */ static int gelic_wl_associate_bss(struct gelic_wl_info *wl, struct gelic_wl_scan_info *bss) { struct gelic_eurus_cmd *cmd; struct gelic_eurus_common_cfg *common; int ret = 0; unsigned long rc; pr_debug("%s: <-\n", __func__); /* do common config */ common = (struct gelic_eurus_common_cfg *)__get_free_page(GFP_KERNEL); if (!common) return -ENOMEM; memset(common, 0, sizeof(*common)); common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA); common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG); common->scan_index = cpu_to_be16(bss->eurus_index); switch (wl->auth_method) { case GELIC_EURUS_AUTH_OPEN: common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_OPEN); break; case GELIC_EURUS_AUTH_SHARED: common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_SHARED); break; } #ifdef DEBUG scan_list_dump(wl); #endif pr_debug("%s: common cfg index=%d bsstype=%d auth=%d\n", __func__, be16_to_cpu(common->scan_index), be16_to_cpu(common->bss_type), be16_to_cpu(common->auth_method)); cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_COMMON_CFG, common, sizeof(*common)); if (!cmd || cmd->status || cmd->cmd_status) { ret = -ENOMEM; kfree(cmd); goto out; } kfree(cmd); /* WEP/WPA */ switch (wl->wpa_level) { case GELIC_WL_WPA_LEVEL_NONE: /* If WEP or no security, setup WEP config */ ret = gelic_wl_do_wep_setup(wl); break; case GELIC_WL_WPA_LEVEL_WPA: case GELIC_WL_WPA_LEVEL_WPA2: ret = gelic_wl_do_wpa_setup(wl); break; } if (ret) { pr_debug("%s: WEP/WPA setup failed %d\n", __func__, ret); ret = -EPERM; gelic_wl_send_iwap_event(wl, NULL); goto out; } /* start association */ init_completion(&wl->assoc_done); wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATING; cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_ASSOC, NULL, 0); if (!cmd || cmd->status || cmd->cmd_status) { pr_debug("%s: assoc request failed\n", __func__); wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; kfree(cmd); ret = -ENOMEM; gelic_wl_send_iwap_event(wl, NULL); goto out; } kfree(cmd); /* wait for connected event */ rc = wait_for_completion_timeout(&wl->assoc_done, HZ * 4);/*FIXME*/ if (!rc) { /* timeouted. Maybe key or cyrpt mode is wrong */ pr_info("%s: connect timeout\n", __func__); cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); kfree(cmd); wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; gelic_wl_send_iwap_event(wl, NULL); ret = -ENXIO; } else { wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATED; /* copy bssid */ memcpy(wl->active_bssid, &bss->hwinfo->bssid[2], ETH_ALEN); /* send connect event */ gelic_wl_send_iwap_event(wl, wl->active_bssid); pr_info("%s: connected\n", __func__); } out: free_page((unsigned long)common); pr_debug("%s: ->\n", __func__); return ret; } /* * connected event */ static void gelic_wl_connected_event(struct gelic_wl_info *wl, u64 event) { u64 desired_event = 0; switch (wl->wpa_level) { case GELIC_WL_WPA_LEVEL_NONE: desired_event = GELIC_LV1_WL_EVENT_CONNECTED; break; case GELIC_WL_WPA_LEVEL_WPA: case GELIC_WL_WPA_LEVEL_WPA2: desired_event = GELIC_LV1_WL_EVENT_WPA_CONNECTED; break; } if (desired_event == event) { pr_debug("%s: completed\n", __func__); complete(&wl->assoc_done); netif_carrier_on(port_to_netdev(wl_port(wl))); } else pr_debug("%s: event %#llx under wpa\n", __func__, event); } /* * disconnect event */ static void gelic_wl_disconnect_event(struct gelic_wl_info *wl, u64 event) { struct gelic_eurus_cmd *cmd; int lock; /* * If we fall here in the middle of association, * associate_bss() should be waiting for complation of * wl->assoc_done. * As it waits with timeout, just leave assoc_done * uncompleted, then it terminates with timeout */ if (!mutex_trylock(&wl->assoc_stat_lock)) { pr_debug("%s: already locked\n", __func__); lock = 0; } else { pr_debug("%s: obtain lock\n", __func__); lock = 1; } cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); kfree(cmd); /* send disconnected event to the supplicant */ if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) gelic_wl_send_iwap_event(wl, NULL); wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; netif_carrier_off(port_to_netdev(wl_port(wl))); if (lock) mutex_unlock(&wl->assoc_stat_lock); } /* * event worker */ #ifdef DEBUG static const char *eventstr(enum gelic_lv1_wl_event event) { static char buf[32]; char *ret; if (event & GELIC_LV1_WL_EVENT_DEVICE_READY) ret = "EURUS_READY"; else if (event & GELIC_LV1_WL_EVENT_SCAN_COMPLETED) ret = "SCAN_COMPLETED"; else if (event & GELIC_LV1_WL_EVENT_DEAUTH) ret = "DEAUTH"; else if (event & GELIC_LV1_WL_EVENT_BEACON_LOST) ret = "BEACON_LOST"; else if (event & GELIC_LV1_WL_EVENT_CONNECTED) ret = "CONNECTED"; else if (event & GELIC_LV1_WL_EVENT_WPA_CONNECTED) ret = "WPA_CONNECTED"; else if (event & GELIC_LV1_WL_EVENT_WPA_ERROR) ret = "WPA_ERROR"; else { sprintf(buf, "Unknown(%#x)", event); ret = buf; } return ret; } #else static const char *eventstr(enum gelic_lv1_wl_event event) { return NULL; } #endif static void gelic_wl_event_worker(struct work_struct *work) { struct gelic_wl_info *wl; struct gelic_port *port; u64 event, tmp; int status; pr_debug("%s:start\n", __func__); wl = container_of(work, struct gelic_wl_info, event_work.work); port = wl_port(wl); while (1) { status = lv1_net_control(bus_id(port->card), dev_id(port->card), GELIC_LV1_GET_WLAN_EVENT, 0, 0, 0, &event, &tmp); if (status) { if (status != LV1_NO_ENTRY) pr_debug("%s:wlan event failed %d\n", __func__, status); /* got all events */ pr_debug("%s:end\n", __func__); return; } pr_debug("%s: event=%s\n", __func__, eventstr(event)); switch (event) { case GELIC_LV1_WL_EVENT_SCAN_COMPLETED: gelic_wl_scan_complete_event(wl); break; case GELIC_LV1_WL_EVENT_BEACON_LOST: case GELIC_LV1_WL_EVENT_DEAUTH: gelic_wl_disconnect_event(wl, event); break; case GELIC_LV1_WL_EVENT_CONNECTED: case GELIC_LV1_WL_EVENT_WPA_CONNECTED: gelic_wl_connected_event(wl, event); break; default: break; } } /* while */ } /* * association worker */ static void gelic_wl_assoc_worker(struct work_struct *work) { struct gelic_wl_info *wl; struct gelic_wl_scan_info *best_bss; int ret; unsigned long irqflag; u8 *essid; size_t essid_len; wl = container_of(work, struct gelic_wl_info, assoc_work.work); mutex_lock(&wl->assoc_stat_lock); if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN) goto out; spin_lock_irqsave(&wl->lock, irqflag); if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) { pr_debug("%s: assoc ESSID configured %s\n", __func__, wl->essid); essid = wl->essid; essid_len = wl->essid_len; } else { essid = NULL; essid_len = 0; } spin_unlock_irqrestore(&wl->lock, irqflag); ret = gelic_wl_start_scan(wl, 0, essid, essid_len); if (ret == -ERESTARTSYS) { pr_debug("%s: scan start failed association\n", __func__); schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/ goto out; } else if (ret) { pr_info("%s: scan prerequisite failed\n", __func__); goto out; } /* * Wait for bss scan completion * If we have scan list already, gelic_wl_start_scan() * returns OK and raises the complete. Thus, * it's ok to wait unconditionally here */ wait_for_completion(&wl->scan_done); pr_debug("%s: scan done\n", __func__); mutex_lock(&wl->scan_lock); if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) { gelic_wl_send_iwap_event(wl, NULL); pr_info("%s: no scan list. association failed\n", __func__); goto scan_lock_out; } /* find best matching bss */ best_bss = gelic_wl_find_best_bss(wl); if (!best_bss) { gelic_wl_send_iwap_event(wl, NULL); pr_info("%s: no bss matched. association failed\n", __func__); goto scan_lock_out; } /* ok, do association */ ret = gelic_wl_associate_bss(wl, best_bss); if (ret) pr_info("%s: association failed %d\n", __func__, ret); scan_lock_out: mutex_unlock(&wl->scan_lock); out: mutex_unlock(&wl->assoc_stat_lock); } /* * Interrupt handler * Called from the ethernet interrupt handler * Processes wireless specific virtual interrupts only */ void gelic_wl_interrupt(struct net_device *netdev, u64 status) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); if (status & GELIC_CARD_WLAN_COMMAND_COMPLETED) { pr_debug("%s:cmd complete\n", __func__); complete(&wl->cmd_done_intr); } if (status & GELIC_CARD_WLAN_EVENT_RECEIVED) { pr_debug("%s:event received\n", __func__); queue_delayed_work(wl->event_queue, &wl->event_work, 0); } } /* * driver helpers */ static const iw_handler gelic_wl_wext_handler[] = { IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name), IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range), IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan), IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan), IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth), IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth), IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid), IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid), IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode), IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode), IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap), IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap), IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext), IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext), IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode), IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode), IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick), }; static const struct iw_handler_def gelic_wl_wext_handler_def = { .num_standard = ARRAY_SIZE(gelic_wl_wext_handler), .standard = gelic_wl_wext_handler, .get_wireless_stats = gelic_wl_get_wireless_stats, }; static struct net_device *gelic_wl_alloc(struct gelic_card *card) { struct net_device *netdev; struct gelic_port *port; struct gelic_wl_info *wl; unsigned int i; pr_debug("%s:start\n", __func__); netdev = alloc_etherdev(sizeof(struct gelic_port) + sizeof(struct gelic_wl_info)); pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card); if (!netdev) return NULL; strcpy(netdev->name, "wlan%d"); port = netdev_priv(netdev); port->netdev = netdev; port->card = card; port->type = GELIC_PORT_WIRELESS; wl = port_wl(port); pr_debug("%s: wl=%p port=%p\n", __func__, wl, port); /* allocate scan list */ wl->networks = kzalloc(sizeof(struct gelic_wl_scan_info) * GELIC_WL_BSS_MAX_ENT, GFP_KERNEL); if (!wl->networks) goto fail_bss; wl->eurus_cmd_queue = create_singlethread_workqueue("gelic_cmd"); if (!wl->eurus_cmd_queue) goto fail_cmd_workqueue; wl->event_queue = create_singlethread_workqueue("gelic_event"); if (!wl->event_queue) goto fail_event_workqueue; INIT_LIST_HEAD(&wl->network_free_list); INIT_LIST_HEAD(&wl->network_list); for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++) list_add_tail(&wl->networks[i].list, &wl->network_free_list); init_completion(&wl->cmd_done_intr); INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker); INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker); mutex_init(&wl->scan_lock); mutex_init(&wl->assoc_stat_lock); init_completion(&wl->scan_done); /* for the case that no scan request is issued and stop() is called */ complete(&wl->scan_done); spin_lock_init(&wl->lock); wl->scan_age = 5*HZ; /* FIXME */ /* buffer for receiving scanned list etc */ BUILD_BUG_ON(PAGE_SIZE < sizeof(struct gelic_eurus_scan_info) * GELIC_EURUS_MAX_SCAN); pr_debug("%s:end\n", __func__); return netdev; fail_event_workqueue: destroy_workqueue(wl->eurus_cmd_queue); fail_cmd_workqueue: kfree(wl->networks); fail_bss: free_netdev(netdev); pr_debug("%s:end error\n", __func__); return NULL; } static void gelic_wl_free(struct gelic_wl_info *wl) { struct gelic_wl_scan_info *scan_info; unsigned int i; pr_debug("%s: <-\n", __func__); pr_debug("%s: destroy queues\n", __func__); destroy_workqueue(wl->eurus_cmd_queue); destroy_workqueue(wl->event_queue); scan_info = wl->networks; for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++, scan_info++) kfree(scan_info->hwinfo); kfree(wl->networks); free_netdev(port_to_netdev(wl_port(wl))); pr_debug("%s: ->\n", __func__); } static int gelic_wl_try_associate(struct net_device *netdev) { struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); int ret = -1; unsigned int i; pr_debug("%s: <-\n", __func__); /* check constraits for start association */ /* for no access restriction AP */ if (wl->group_cipher_method == GELIC_WL_CIPHER_NONE) { if (test_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat)) goto do_associate; else { pr_debug("%s: no wep, not configured\n", __func__); return ret; } } /* for WEP, one of four keys should be set */ if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { /* one of keys set */ for (i = 0; i < GELIC_WEP_KEYS; i++) { if (test_bit(i, &wl->key_enabled)) goto do_associate; } pr_debug("%s: WEP, but no key specified\n", __func__); return ret; } /* for WPA[2], psk should be set */ if ((wl->group_cipher_method == GELIC_WL_CIPHER_TKIP) || (wl->group_cipher_method == GELIC_WL_CIPHER_AES)) { if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) goto do_associate; else { pr_debug("%s: AES/TKIP, but PSK not configured\n", __func__); return ret; } } do_associate: ret = schedule_delayed_work(&wl->assoc_work, 0); pr_debug("%s: start association work %d\n", __func__, ret); return ret; } /* * netdev handlers */ static int gelic_wl_open(struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); pr_debug("%s:->%p\n", __func__, netdev); gelic_card_up(card); /* try to associate */ gelic_wl_try_associate(netdev); netif_start_queue(netdev); pr_debug("%s:<-\n", __func__); return 0; } /* * reset state machine */ static int gelic_wl_reset_state(struct gelic_wl_info *wl) { struct gelic_wl_scan_info *target; struct gelic_wl_scan_info *tmp; /* empty scan list */ list_for_each_entry_safe(target, tmp, &wl->network_list, list) { list_move_tail(&target->list, &wl->network_free_list); } wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; /* clear configuration */ wl->auth_method = GELIC_EURUS_AUTH_OPEN; wl->group_cipher_method = GELIC_WL_CIPHER_NONE; wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; wl->key_enabled = 0; wl->current_key = 0; wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE; wl->psk_len = 0; wl->essid_len = 0; memset(wl->essid, 0, sizeof(wl->essid)); memset(wl->bssid, 0, sizeof(wl->bssid)); memset(wl->active_bssid, 0, sizeof(wl->active_bssid)); wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; memset(&wl->iwstat, 0, sizeof(wl->iwstat)); /* all status bit clear */ wl->stat = 0; return 0; } /* * Tell eurus to terminate association */ static void gelic_wl_disconnect(struct net_device *netdev) { struct gelic_port *port = netdev_priv(netdev); struct gelic_wl_info *wl = port_wl(port); struct gelic_eurus_cmd *cmd; /* * If scann process is running on chip, * further requests will be rejected */ if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) wait_for_completion_timeout(&wl->scan_done, HZ); cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); kfree(cmd); gelic_wl_send_iwap_event(wl, NULL); }; static int gelic_wl_stop(struct net_device *netdev) { struct gelic_port *port = netdev_priv(netdev); struct gelic_wl_info *wl = port_wl(port); struct gelic_card *card = netdev_card(netdev); pr_debug("%s:<-\n", __func__); /* * Cancel pending association work. * event work can run after netdev down */ cancel_delayed_work(&wl->assoc_work); if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) gelic_wl_disconnect(netdev); /* reset our state machine */ gelic_wl_reset_state(wl); netif_stop_queue(netdev); gelic_card_down(card); pr_debug("%s:->\n", __func__); return 0; } /* -- */ static const struct net_device_ops gelic_wl_netdevice_ops = { .ndo_open = gelic_wl_open, .ndo_stop = gelic_wl_stop, .ndo_start_xmit = gelic_net_xmit, .ndo_set_rx_mode = gelic_net_set_multi, .ndo_change_mtu = gelic_net_change_mtu, .ndo_tx_timeout = gelic_net_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gelic_net_poll_controller, #endif }; static const struct ethtool_ops gelic_wl_ethtool_ops = { .get_drvinfo = gelic_net_get_drvinfo, .get_link = gelic_wl_get_link, }; static void gelic_wl_setup_netdev_ops(struct net_device *netdev) { struct gelic_wl_info *wl; wl = port_wl(netdev_priv(netdev)); BUG_ON(!wl); netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; netdev->ethtool_ops = &gelic_wl_ethtool_ops; netdev->netdev_ops = &gelic_wl_netdevice_ops; netdev->wireless_data = &wl->wireless_data; netdev->wireless_handlers = &gelic_wl_wext_handler_def; } /* * driver probe/remove */ int gelic_wl_driver_probe(struct gelic_card *card) { int ret; struct net_device *netdev; pr_debug("%s:start\n", __func__); if (ps3_compare_firmware_version(1, 6, 0) < 0) return 0; if (!card->vlan[GELIC_PORT_WIRELESS].tx) return 0; /* alloc netdevice for wireless */ netdev = gelic_wl_alloc(card); if (!netdev) return -ENOMEM; /* setup net_device structure */ SET_NETDEV_DEV(netdev, &card->dev->core); gelic_wl_setup_netdev_ops(netdev); /* setup some of net_device and register it */ ret = gelic_net_setup_netdev(netdev, card); if (ret) goto fail_setup; card->netdev[GELIC_PORT_WIRELESS] = netdev; /* add enable wireless interrupt */ card->irq_mask |= GELIC_CARD_WLAN_EVENT_RECEIVED | GELIC_CARD_WLAN_COMMAND_COMPLETED; /* to allow wireless commands while both interfaces are down */ gelic_card_set_irq_mask(card, GELIC_CARD_WLAN_EVENT_RECEIVED | GELIC_CARD_WLAN_COMMAND_COMPLETED); pr_debug("%s:end\n", __func__); return 0; fail_setup: gelic_wl_free(port_wl(netdev_port(netdev))); return ret; } int gelic_wl_driver_remove(struct gelic_card *card) { struct gelic_wl_info *wl; struct net_device *netdev; pr_debug("%s:start\n", __func__); if (ps3_compare_firmware_version(1, 6, 0) < 0) return 0; if (!card->vlan[GELIC_PORT_WIRELESS].tx) return 0; netdev = card->netdev[GELIC_PORT_WIRELESS]; wl = port_wl(netdev_priv(netdev)); /* if the interface was not up, but associated */ if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) gelic_wl_disconnect(netdev); complete(&wl->cmd_done_intr); /* cancel all work queue */ cancel_delayed_work(&wl->assoc_work); cancel_delayed_work(&wl->event_work); flush_workqueue(wl->eurus_cmd_queue); flush_workqueue(wl->event_queue); unregister_netdev(netdev); /* disable wireless interrupt */ pr_debug("%s: disable intr\n", __func__); card->irq_mask &= ~(GELIC_CARD_WLAN_EVENT_RECEIVED | GELIC_CARD_WLAN_COMMAND_COMPLETED); /* free bss list, netdev*/ gelic_wl_free(wl); pr_debug("%s:end\n", __func__); return 0; }
gpl-2.0
pikachukaki/android_kernel_huawei_honor
drivers/media/rc/keymaps/rc-iodata-bctv7e.c
3094
2098
/* iodata-bctv7e.h - Keytable for iodata_bctv7e Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* IO-DATA BCTV7E Remote */ static struct rc_map_table iodata_bctv7e[] = { { 0x40, KEY_TV }, { 0x20, KEY_RADIO }, /* FM */ { 0x60, KEY_EPG }, { 0x00, KEY_POWER }, /* Keys 0 to 9 */ { 0x44, KEY_0 }, /* 10 */ { 0x50, KEY_1 }, { 0x30, KEY_2 }, { 0x70, KEY_3 }, { 0x48, KEY_4 }, { 0x28, KEY_5 }, { 0x68, KEY_6 }, { 0x58, KEY_7 }, { 0x38, KEY_8 }, { 0x78, KEY_9 }, { 0x10, KEY_L }, /* Live */ { 0x08, KEY_TIME }, /* Time Shift */ { 0x18, KEY_PLAYPAUSE }, /* Play */ { 0x24, KEY_ENTER }, /* 11 */ { 0x64, KEY_ESC }, /* 12 */ { 0x04, KEY_M }, /* Multi */ { 0x54, KEY_VIDEO }, { 0x34, KEY_CHANNELUP }, { 0x74, KEY_VOLUMEUP }, { 0x14, KEY_MUTE }, { 0x4c, KEY_VCR }, /* SVIDEO */ { 0x2c, KEY_CHANNELDOWN }, { 0x6c, KEY_VOLUMEDOWN }, { 0x0c, KEY_ZOOM }, { 0x5c, KEY_PAUSE }, { 0x3c, KEY_RED }, /* || (red) */ { 0x7c, KEY_RECORD }, /* recording */ { 0x1c, KEY_STOP }, { 0x41, KEY_REWIND }, /* backward << */ { 0x21, KEY_PLAY }, { 0x61, KEY_FASTFORWARD }, /* forward >> */ { 0x01, KEY_NEXT }, /* skip >| */ }; static struct rc_map_list iodata_bctv7e_map = { .map = { .scan = iodata_bctv7e, .size = ARRAY_SIZE(iodata_bctv7e), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_IODATA_BCTV7E, } }; static int __init init_rc_map_iodata_bctv7e(void) { return rc_map_register(&iodata_bctv7e_map); } static void __exit exit_rc_map_iodata_bctv7e(void) { rc_map_unregister(&iodata_bctv7e_map); } module_init(init_rc_map_iodata_bctv7e) module_exit(exit_rc_map_iodata_bctv7e) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
allspark2020/hawx
sound/oss/sb_mixer.c
3606
19729
/* * sound/oss/sb_mixer.c * * The low level mixer driver for the Sound Blaster compatible cards. */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) * Rolf Fokkens (Dec 20 1998) : Moved ESS stuff into sb_ess.[ch] * Stanislav Voronyi <stas@esc.kharkov.com> : Support for AWE 3DSE device (Jun 7 1999) */ #include <linux/slab.h> #include "sound_config.h" #define __SB_MIXER_C__ #include "sb.h" #include "sb_mixer.h" #include "sb_ess.h" #define SBPRO_RECORDING_DEVICES (SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD) /* Same as SB Pro, unless I find otherwise */ #define SGNXPRO_RECORDING_DEVICES SBPRO_RECORDING_DEVICES #define SBPRO_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | \ SOUND_MASK_CD | SOUND_MASK_VOLUME) /* SG NX Pro has treble and bass settings on the mixer. The 'speaker' * channel is the COVOX/DisneySoundSource emulation volume control * on the mixer. It does NOT control speaker volume. Should have own * mask eventually? */ #define SGNXPRO_MIXER_DEVICES (SBPRO_MIXER_DEVICES|SOUND_MASK_BASS| \ SOUND_MASK_TREBLE|SOUND_MASK_SPEAKER ) #define SB16_RECORDING_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_LINE | SOUND_MASK_MIC | \ SOUND_MASK_CD) #define SB16_OUTFILTER_DEVICES (SOUND_MASK_LINE | SOUND_MASK_MIC | \ SOUND_MASK_CD) #define SB16_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | \ SOUND_MASK_CD | \ SOUND_MASK_IGAIN | SOUND_MASK_OGAIN | \ SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | \ SOUND_MASK_IMIX) /* These are the only devices that are working at the moment. Others could * be added once they are identified and a method is found to control them. */ #define ALS007_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_LINE | \ SOUND_MASK_PCM | SOUND_MASK_MIC | \ SOUND_MASK_CD | \ SOUND_MASK_VOLUME) static mixer_tab sbpro_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x22, 7, 4, 0x22, 3, 4), MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_SYNTH, 0x26, 7, 4, 0x26, 3, 4), MIX_ENT(SOUND_MIXER_PCM, 0x04, 7, 4, 0x04, 3, 4), MIX_ENT(SOUND_MIXER_SPEAKER, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x2e, 7, 4, 0x2e, 3, 4), MIX_ENT(SOUND_MIXER_MIC, 0x0a, 2, 3, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_CD, 0x28, 7, 4, 0x28, 3, 4), MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0) }; static mixer_tab sb16_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x30, 7, 5, 0x31, 7, 5), MIX_ENT(SOUND_MIXER_BASS, 0x46, 7, 4, 0x47, 7, 4), MIX_ENT(SOUND_MIXER_TREBLE, 0x44, 7, 4, 0x45, 7, 4), MIX_ENT(SOUND_MIXER_SYNTH, 0x34, 7, 5, 0x35, 7, 5), MIX_ENT(SOUND_MIXER_PCM, 0x32, 7, 5, 0x33, 7, 5), MIX_ENT(SOUND_MIXER_SPEAKER, 0x3b, 7, 2, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x38, 7, 5, 0x39, 7, 5), MIX_ENT(SOUND_MIXER_MIC, 0x3a, 7, 5, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_CD, 0x36, 7, 5, 0x37, 7, 5), MIX_ENT(SOUND_MIXER_IMIX, 0x3c, 0, 1, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0x3f, 7, 2, 0x40, 7, 2), /* Obsolete. Use IGAIN */ MIX_ENT(SOUND_MIXER_IGAIN, 0x3f, 7, 2, 0x40, 7, 2), MIX_ENT(SOUND_MIXER_OGAIN, 0x41, 7, 2, 0x42, 7, 2) }; static mixer_tab als007_mix = { MIX_ENT(SOUND_MIXER_VOLUME, 0x62, 7, 4, 0x62, 3, 4), MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_SYNTH, 0x66, 7, 4, 0x66, 3, 4), MIX_ENT(SOUND_MIXER_PCM, 0x64, 7, 4, 0x64, 3, 4), MIX_ENT(SOUND_MIXER_SPEAKER, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_LINE, 0x6e, 7, 4, 0x6e, 3, 4), MIX_ENT(SOUND_MIXER_MIC, 0x6a, 2, 3, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_CD, 0x68, 7, 4, 0x68, 3, 4), MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0), /* Obsolete. Use IGAIN */ MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0), MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0) }; /* SM_GAMES Master volume is lower and PCM & FM volumes higher than with SB Pro. This improves the sound quality */ static int smg_default_levels[32] = { 0x2020, /* Master Volume */ 0x4b4b, /* Bass */ 0x4b4b, /* Treble */ 0x6464, /* FM */ 0x6464, /* PCM */ 0x4b4b, /* PC Speaker */ 0x4b4b, /* Ext Line */ 0x0000, /* Mic */ 0x4b4b, /* CD */ 0x4b4b, /* Recording monitor */ 0x4b4b, /* SB PCM */ 0x4b4b, /* Recording level */ 0x4b4b, /* Input gain */ 0x4b4b, /* Output gain */ 0x4040, /* Line1 */ 0x4040, /* Line2 */ 0x1515 /* Line3 */ }; static int sb_default_levels[32] = { 0x5a5a, /* Master Volume */ 0x4b4b, /* Bass */ 0x4b4b, /* Treble */ 0x4b4b, /* FM */ 0x4b4b, /* PCM */ 0x4b4b, /* PC Speaker */ 0x4b4b, /* Ext Line */ 0x1010, /* Mic */ 0x4b4b, /* CD */ 0x0000, /* Recording monitor */ 0x4b4b, /* SB PCM */ 0x4b4b, /* Recording level */ 0x4b4b, /* Input gain */ 0x4b4b, /* Output gain */ 0x4040, /* Line1 */ 0x4040, /* Line2 */ 0x1515 /* Line3 */ }; static unsigned char sb16_recmasks_L[SOUND_MIXER_NRDEVICES] = { 0x00, /* SOUND_MIXER_VOLUME */ 0x00, /* SOUND_MIXER_BASS */ 0x00, /* SOUND_MIXER_TREBLE */ 0x40, /* SOUND_MIXER_SYNTH */ 0x00, /* SOUND_MIXER_PCM */ 0x00, /* SOUND_MIXER_SPEAKER */ 0x10, /* SOUND_MIXER_LINE */ 0x01, /* SOUND_MIXER_MIC */ 0x04, /* SOUND_MIXER_CD */ 0x00, /* SOUND_MIXER_IMIX */ 0x00, /* SOUND_MIXER_ALTPCM */ 0x00, /* SOUND_MIXER_RECLEV */ 0x00, /* SOUND_MIXER_IGAIN */ 0x00 /* SOUND_MIXER_OGAIN */ }; static unsigned char sb16_recmasks_R[SOUND_MIXER_NRDEVICES] = { 0x00, /* SOUND_MIXER_VOLUME */ 0x00, /* SOUND_MIXER_BASS */ 0x00, /* SOUND_MIXER_TREBLE */ 0x20, /* SOUND_MIXER_SYNTH */ 0x00, /* SOUND_MIXER_PCM */ 0x00, /* SOUND_MIXER_SPEAKER */ 0x08, /* SOUND_MIXER_LINE */ 0x01, /* SOUND_MIXER_MIC */ 0x02, /* SOUND_MIXER_CD */ 0x00, /* SOUND_MIXER_IMIX */ 0x00, /* SOUND_MIXER_ALTPCM */ 0x00, /* SOUND_MIXER_RECLEV */ 0x00, /* SOUND_MIXER_IGAIN */ 0x00 /* SOUND_MIXER_OGAIN */ }; static char smw_mix_regs[] = /* Left mixer registers */ { 0x0b, /* SOUND_MIXER_VOLUME */ 0x0d, /* SOUND_MIXER_BASS */ 0x0d, /* SOUND_MIXER_TREBLE */ 0x05, /* SOUND_MIXER_SYNTH */ 0x09, /* SOUND_MIXER_PCM */ 0x00, /* SOUND_MIXER_SPEAKER */ 0x03, /* SOUND_MIXER_LINE */ 0x01, /* SOUND_MIXER_MIC */ 0x07, /* SOUND_MIXER_CD */ 0x00, /* SOUND_MIXER_IMIX */ 0x00, /* SOUND_MIXER_ALTPCM */ 0x00, /* SOUND_MIXER_RECLEV */ 0x00, /* SOUND_MIXER_IGAIN */ 0x00, /* SOUND_MIXER_OGAIN */ 0x00, /* SOUND_MIXER_LINE1 */ 0x00, /* SOUND_MIXER_LINE2 */ 0x00 /* SOUND_MIXER_LINE3 */ }; static int sbmixnum = 1; static void sb_mixer_reset(sb_devc * devc); void sb_mixer_set_stereo(sb_devc * devc, int mode) { sb_chgmixer(devc, OUT_FILTER, STEREO_DAC, (mode ? STEREO_DAC : MONO_DAC)); } static int detect_mixer(sb_devc * devc) { /* Just trust the mixer is there */ return 1; } static void change_bits(sb_devc * devc, unsigned char *regval, int dev, int chn, int newval) { unsigned char mask; int shift; mask = (1 << (*devc->iomap)[dev][chn].nbits) - 1; newval = (int) ((newval * mask) + 50) / 100; /* Scale */ shift = (*devc->iomap)[dev][chn].bitoffs - (*devc->iomap)[dev][LEFT_CHN].nbits + 1; *regval &= ~(mask << shift); /* Mask out previous value */ *regval |= (newval & mask) << shift; /* Set the new value */ } static int sb_mixer_get(sb_devc * devc, int dev) { if (!((1 << dev) & devc->supported_devices)) return -EINVAL; return devc->levels[dev]; } void smw_mixer_init(sb_devc * devc) { int i; sb_setmixer(devc, 0x00, 0x18); /* Mute unused (Telephone) line */ sb_setmixer(devc, 0x10, 0x38); /* Config register 2 */ devc->supported_devices = 0; for (i = 0; i < sizeof(smw_mix_regs); i++) if (smw_mix_regs[i] != 0) devc->supported_devices |= (1 << i); devc->supported_rec_devices = devc->supported_devices & ~(SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_PCM | SOUND_MASK_VOLUME); sb_mixer_reset(devc); } int sb_common_mixer_set(sb_devc * devc, int dev, int left, int right) { int regoffs; unsigned char val; if ((dev < 0) || (dev >= devc->iomap_sz)) return -EINVAL; regoffs = (*devc->iomap)[dev][LEFT_CHN].regno; if (regoffs == 0) return -EINVAL; val = sb_getmixer(devc, regoffs); change_bits(devc, &val, dev, LEFT_CHN, left); if ((*devc->iomap)[dev][RIGHT_CHN].regno != regoffs) /* * Change register */ { sb_setmixer(devc, regoffs, val); /* * Save the old one */ regoffs = (*devc->iomap)[dev][RIGHT_CHN].regno; if (regoffs == 0) return left | (left << 8); /* * Just left channel present */ val = sb_getmixer(devc, regoffs); /* * Read the new one */ } change_bits(devc, &val, dev, RIGHT_CHN, right); sb_setmixer(devc, regoffs, val); return left | (right << 8); } static int smw_mixer_set(sb_devc * devc, int dev, int left, int right) { int reg, val; switch (dev) { case SOUND_MIXER_VOLUME: sb_setmixer(devc, 0x0b, 96 - (96 * left / 100)); /* 96=mute, 0=max */ sb_setmixer(devc, 0x0c, 96 - (96 * right / 100)); break; case SOUND_MIXER_BASS: case SOUND_MIXER_TREBLE: devc->levels[dev] = left | (right << 8); /* Set left bass and treble values */ val = ((devc->levels[SOUND_MIXER_TREBLE] & 0xff) * 16 / (unsigned) 100) << 4; val |= ((devc->levels[SOUND_MIXER_BASS] & 0xff) * 16 / (unsigned) 100) & 0x0f; sb_setmixer(devc, 0x0d, val); /* Set right bass and treble values */ val = (((devc->levels[SOUND_MIXER_TREBLE] >> 8) & 0xff) * 16 / (unsigned) 100) << 4; val |= (((devc->levels[SOUND_MIXER_BASS] >> 8) & 0xff) * 16 / (unsigned) 100) & 0x0f; sb_setmixer(devc, 0x0e, val); break; default: /* bounds check */ if (dev < 0 || dev >= ARRAY_SIZE(smw_mix_regs)) return -EINVAL; reg = smw_mix_regs[dev]; if (reg == 0) return -EINVAL; sb_setmixer(devc, reg, (24 - (24 * left / 100)) | 0x20); /* 24=mute, 0=max */ sb_setmixer(devc, reg + 1, (24 - (24 * right / 100)) | 0x40); } devc->levels[dev] = left | (right << 8); return left | (right << 8); } static int sb_mixer_set(sb_devc * devc, int dev, int value) { int left = value & 0x000000ff; int right = (value & 0x0000ff00) >> 8; int retval; if (left > 100) left = 100; if (right > 100) right = 100; if ((dev < 0) || (dev > 31)) return -EINVAL; if (!(devc->supported_devices & (1 << dev))) /* * Not supported */ return -EINVAL; /* Differentiate depending on the chipsets */ switch (devc->model) { case MDL_SMW: retval = smw_mixer_set(devc, dev, left, right); break; case MDL_ESS: retval = ess_mixer_set(devc, dev, left, right); break; default: retval = sb_common_mixer_set(devc, dev, left, right); } if (retval >= 0) devc->levels[dev] = retval; return retval; } /* * set_recsrc doesn't apply to ES188x */ static void set_recsrc(sb_devc * devc, int src) { sb_setmixer(devc, RECORD_SRC, (sb_getmixer(devc, RECORD_SRC) & ~7) | (src & 0x7)); } static int set_recmask(sb_devc * devc, int mask) { int devmask, i; unsigned char regimageL, regimageR; devmask = mask & devc->supported_rec_devices; switch (devc->model) { case MDL_SBPRO: case MDL_ESS: case MDL_JAZZ: case MDL_SMW: if (devc->model == MDL_ESS && ess_set_recmask (devc, &devmask)) { break; }; if (devmask != SOUND_MASK_MIC && devmask != SOUND_MASK_LINE && devmask != SOUND_MASK_CD) { /* * More than one device selected. Drop the * previous selection */ devmask &= ~devc->recmask; } if (devmask != SOUND_MASK_MIC && devmask != SOUND_MASK_LINE && devmask != SOUND_MASK_CD) { /* * More than one device selected. Default to * mic */ devmask = SOUND_MASK_MIC; } if (devmask ^ devc->recmask) /* * Input source changed */ { switch (devmask) { case SOUND_MASK_MIC: set_recsrc(devc, SRC__MIC); break; case SOUND_MASK_LINE: set_recsrc(devc, SRC__LINE); break; case SOUND_MASK_CD: set_recsrc(devc, SRC__CD); break; default: set_recsrc(devc, SRC__MIC); } } break; case MDL_SB16: if (!devmask) devmask = SOUND_MASK_MIC; if (devc->submodel == SUBMDL_ALS007) { switch (devmask) { case SOUND_MASK_LINE: sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_LINE); break; case SOUND_MASK_CD: sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_CD); break; case SOUND_MASK_SYNTH: sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_SYNTH); break; default: /* Also takes care of SOUND_MASK_MIC case */ sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_MIC); break; } } else { regimageL = regimageR = 0; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if ((1 << i) & devmask) { regimageL |= sb16_recmasks_L[i]; regimageR |= sb16_recmasks_R[i]; } sb_setmixer (devc, SB16_IMASK_L, regimageL); sb_setmixer (devc, SB16_IMASK_R, regimageR); } } break; } devc->recmask = devmask; return devc->recmask; } static int set_outmask(sb_devc * devc, int mask) { int devmask, i; unsigned char regimage; devmask = mask & devc->supported_out_devices; switch (devc->model) { case MDL_SB16: if (devc->submodel == SUBMDL_ALS007) break; else { regimage = 0; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if ((1 << i) & devmask) { regimage |= (sb16_recmasks_L[i] | sb16_recmasks_R[i]); } sb_setmixer (devc, SB16_OMASK, regimage); } } break; default: break; } devc->outmask = devmask; return devc->outmask; } static int sb_mixer_ioctl(int dev, unsigned int cmd, void __user *arg) { sb_devc *devc = mixer_devs[dev]->devc; int val, ret; int __user *p = arg; /* * Use ioctl(fd, SOUND_MIXER_AGC, &mode) to turn AGC off (0) or on (1). * Use ioctl(fd, SOUND_MIXER_3DSE, &mode) to turn 3DSE off (0) or on (1) * or mode==2 put 3DSE state to mode. */ if (devc->model == MDL_SB16) { if (cmd == SOUND_MIXER_AGC) { if (get_user(val, p)) return -EFAULT; sb_setmixer(devc, 0x43, (~val) & 0x01); return 0; } if (cmd == SOUND_MIXER_3DSE) { /* I put here 15, but I don't know the exact version. At least my 4.13 havn't 3DSE, 4.16 has it. */ if (devc->minor < 15) return -EINVAL; if (get_user(val, p)) return -EFAULT; if (val == 0 || val == 1) sb_chgmixer(devc, AWE_3DSE, 0x01, val); else if (val == 2) { ret = sb_getmixer(devc, AWE_3DSE)&0x01; return put_user(ret, p); } else return -EINVAL; return 0; } } if (((cmd >> 8) & 0xff) == 'M') { if (_SIOC_DIR(cmd) & _SIOC_WRITE) { if (get_user(val, p)) return -EFAULT; switch (cmd & 0xff) { case SOUND_MIXER_RECSRC: ret = set_recmask(devc, val); break; case SOUND_MIXER_OUTSRC: ret = set_outmask(devc, val); break; default: ret = sb_mixer_set(devc, cmd & 0xff, val); } } else switch (cmd & 0xff) { case SOUND_MIXER_RECSRC: ret = devc->recmask; break; case SOUND_MIXER_OUTSRC: ret = devc->outmask; break; case SOUND_MIXER_DEVMASK: ret = devc->supported_devices; break; case SOUND_MIXER_STEREODEVS: ret = devc->supported_devices; /* The ESS seems to have stereo mic controls */ if (devc->model == MDL_ESS) ret &= ~(SOUND_MASK_SPEAKER|SOUND_MASK_IMIX); else if (devc->model != MDL_JAZZ && devc->model != MDL_SMW) ret &= ~(SOUND_MASK_MIC | SOUND_MASK_SPEAKER | SOUND_MASK_IMIX); break; case SOUND_MIXER_RECMASK: ret = devc->supported_rec_devices; break; case SOUND_MIXER_OUTMASK: ret = devc->supported_out_devices; break; case SOUND_MIXER_CAPS: ret = devc->mixer_caps; break; default: ret = sb_mixer_get(devc, cmd & 0xff); break; } return put_user(ret, p); } else return -EINVAL; } static struct mixer_operations sb_mixer_operations = { .owner = THIS_MODULE, .id = "SB", .name = "Sound Blaster", .ioctl = sb_mixer_ioctl }; static struct mixer_operations als007_mixer_operations = { .owner = THIS_MODULE, .id = "ALS007", .name = "Avance ALS-007", .ioctl = sb_mixer_ioctl }; static void sb_mixer_reset(sb_devc * devc) { char name[32]; int i; sprintf(name, "SB_%d", devc->sbmixnum); if (devc->sbmo.sm_games) devc->levels = load_mixer_volumes(name, smg_default_levels, 1); else devc->levels = load_mixer_volumes(name, sb_default_levels, 1); for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) sb_mixer_set(devc, i, devc->levels[i]); if (devc->model != MDL_ESS || !ess_mixer_reset (devc)) { set_recmask(devc, SOUND_MASK_MIC); }; } int sb_mixer_init(sb_devc * devc, struct module *owner) { int mixer_type = 0; int m; devc->sbmixnum = sbmixnum++; devc->levels = NULL; sb_setmixer(devc, 0x00, 0); /* Reset mixer */ if (!(mixer_type = detect_mixer(devc))) return 0; /* No mixer. Why? */ switch (devc->model) { case MDL_ESSPCI: case MDL_YMPCI: case MDL_SBPRO: case MDL_AZTECH: case MDL_JAZZ: devc->mixer_caps = SOUND_CAP_EXCL_INPUT; devc->supported_devices = SBPRO_MIXER_DEVICES; devc->supported_rec_devices = SBPRO_RECORDING_DEVICES; devc->iomap = &sbpro_mix; devc->iomap_sz = ARRAY_SIZE(sbpro_mix); break; case MDL_ESS: ess_mixer_init (devc); break; case MDL_SMW: devc->mixer_caps = SOUND_CAP_EXCL_INPUT; devc->supported_devices = 0; devc->supported_rec_devices = 0; devc->iomap = &sbpro_mix; devc->iomap_sz = ARRAY_SIZE(sbpro_mix); smw_mixer_init(devc); break; case MDL_SB16: devc->mixer_caps = 0; devc->supported_rec_devices = SB16_RECORDING_DEVICES; devc->supported_out_devices = SB16_OUTFILTER_DEVICES; if (devc->submodel != SUBMDL_ALS007) { devc->supported_devices = SB16_MIXER_DEVICES; devc->iomap = &sb16_mix; devc->iomap_sz = ARRAY_SIZE(sb16_mix); } else { devc->supported_devices = ALS007_MIXER_DEVICES; devc->iomap = &als007_mix; devc->iomap_sz = ARRAY_SIZE(als007_mix); } break; default: printk(KERN_WARNING "sb_mixer: Unsupported mixer type %d\n", devc->model); return 0; } m = sound_alloc_mixerdev(); if (m == -1) return 0; mixer_devs[m] = kmalloc(sizeof(struct mixer_operations), GFP_KERNEL); if (mixer_devs[m] == NULL) { printk(KERN_ERR "sb_mixer: Can't allocate memory\n"); sound_unload_mixerdev(m); return 0; } if (devc->submodel != SUBMDL_ALS007) memcpy ((char *) mixer_devs[m], (char *) &sb_mixer_operations, sizeof (struct mixer_operations)); else memcpy ((char *) mixer_devs[m], (char *) &als007_mixer_operations, sizeof (struct mixer_operations)); mixer_devs[m]->devc = devc; if (owner) mixer_devs[m]->owner = owner; devc->my_mixerdev = m; sb_mixer_reset(devc); return 1; } void sb_mixer_unload(sb_devc *devc) { if (devc->my_mixerdev == -1) return; kfree(mixer_devs[devc->my_mixerdev]); sound_unload_mixerdev(devc->my_mixerdev); sbmixnum--; }
gpl-2.0
PoonKang/Kernel_SCH-I545
net/netfilter/nf_conntrack_timeout.c
5142
1645
/* * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org> * (C) 2012 by Vyatta Inc. <http://www.vyatta.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation (or any later at your option). */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/export.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_timeout.h> struct ctnl_timeout * (*nf_ct_timeout_find_get_hook)(const char *name) __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook); void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook); static struct nf_ct_ext_type timeout_extend __read_mostly = { .len = sizeof(struct nf_conn_timeout), .align = __alignof__(struct nf_conn_timeout), .id = NF_CT_EXT_TIMEOUT, }; int nf_conntrack_timeout_init(struct net *net) { int ret = 0; if (net_eq(net, &init_net)) { ret = nf_ct_extend_register(&timeout_extend); if (ret < 0) { printk(KERN_ERR "nf_ct_timeout: Unable to register " "timeout extension.\n"); return ret; } } return 0; } void nf_conntrack_timeout_fini(struct net *net) { if (net_eq(net, &init_net)) nf_ct_extend_unregister(&timeout_extend); }
gpl-2.0
aditheking/G7102_MM_SWA_Opensource
drivers/gpu/drm/mga/mga_drv.c
5398
4547
/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * Gareth Hughes <gareth@valinux.com> */ #include <linux/module.h> #include "drmP.h" #include "drm.h" #include "mga_drm.h" #include "mga_drv.h" #include "drm_pciids.h" static int mga_driver_device_is_agp(struct drm_device *dev); static struct pci_device_id pciidlist[] = { mga_PCI_IDS }; static const struct file_operations mga_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = mga_compat_ioctl, #endif .llseek = noop_llseek, }; static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_mga_buf_priv_t), .load = mga_driver_load, .unload = mga_driver_unload, .lastclose = mga_driver_lastclose, .dma_quiescent = mga_driver_dma_quiescent, .device_is_agp = mga_driver_device_is_agp, .get_vblank_counter = mga_get_vblank_counter, .enable_vblank = mga_enable_vblank, .disable_vblank = mga_disable_vblank, .irq_preinstall = mga_driver_irq_preinstall, .irq_postinstall = mga_driver_irq_postinstall, .irq_uninstall = mga_driver_irq_uninstall, .irq_handler = mga_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = mga_ioctls, .dma_ioctl = mga_dma_buffers, .fops = &mga_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static struct pci_driver mga_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, }; static int __init mga_init(void) { driver.num_ioctls = mga_max_ioctl; return drm_pci_init(&driver, &mga_pci_driver); } static void __exit mga_exit(void) { drm_pci_exit(&driver, &mga_pci_driver); } module_init(mga_init); module_exit(mga_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); /** * Determine if the device really is AGP or not. * * In addition to the usual tests performed by \c drm_device_is_agp, this * function detects PCI G450 cards that appear to the system exactly like * AGP G450 cards. * * \param dev The device to be tested. * * \returns * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. */ static int mga_driver_device_is_agp(struct drm_device *dev) { const struct pci_dev *const pdev = dev->pdev; /* There are PCI versions of the G450. These cards have the * same PCI ID as the AGP G450, but have an additional PCI-to-PCI * bridge chip. We detect these cards, which are not currently * supported by this driver, by looking at the device ID of the * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the * device. */ if ((pdev->device == 0x0525) && pdev->bus->self && (pdev->bus->self->vendor == 0x3388) && (pdev->bus->self->device == 0x0021)) { return 0; } return 2; }
gpl-2.0
eaglerazor/android_kernel_samsung_apollo
arch/m68k/platform/532x/gpio.c
7446
10512
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "PIRQ", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .ngpio = 8, }, .pddr = (void __iomem *) MCFEPORT_EPDDR, .podr = (void __iomem *) MCFEPORT_EPDR, .ppdr = (void __iomem *) MCFEPORT_EPPDR, }, { .gpio_chip = { .label = "FECH", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 8, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_FECH, .podr = (void __iomem *) MCFGPIO_PODR_FECH, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECH, .setr = (void __iomem *) MCFGPIO_PPDSDR_FECH, .clrr = (void __iomem *) MCFGPIO_PCLRR_FECH, }, { .gpio_chip = { .label = "FECL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 16, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_FECL, .podr = (void __iomem *) MCFGPIO_PODR_FECL, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECL, .setr = (void __iomem *) MCFGPIO_PPDSDR_FECL, .clrr = (void __iomem *) MCFGPIO_PCLRR_FECL, }, { .gpio_chip = { .label = "SSI", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 24, .ngpio = 5, }, .pddr = (void __iomem *) MCFGPIO_PDDR_SSI, .podr = (void __iomem *) MCFGPIO_PODR_SSI, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SSI, .setr = (void __iomem *) MCFGPIO_PPDSDR_SSI, .clrr = (void __iomem *) MCFGPIO_PCLRR_SSI, }, { .gpio_chip = { .label = "BUSCTL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 32, .ngpio = 4, }, .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, }, { .gpio_chip = { .label = "BE", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 40, .ngpio = 4, }, .pddr = (void __iomem *) MCFGPIO_PDDR_BE, .podr = (void __iomem *) MCFGPIO_PODR_BE, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BE, .setr = (void __iomem *) MCFGPIO_PPDSDR_BE, .clrr = (void __iomem *) MCFGPIO_PCLRR_BE, }, { .gpio_chip = { .label = "CS", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 49, .ngpio = 5, }, .pddr = (void __iomem *) MCFGPIO_PDDR_CS, .podr = (void __iomem *) MCFGPIO_PODR_CS, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, }, { .gpio_chip = { .label = "PWM", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 58, .ngpio = 4, }, .pddr = (void __iomem *) MCFGPIO_PDDR_PWM, .podr = (void __iomem *) MCFGPIO_PODR_PWM, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_PWM, .setr = (void __iomem *) MCFGPIO_PPDSDR_PWM, .clrr = (void __iomem *) MCFGPIO_PCLRR_PWM, }, { .gpio_chip = { .label = "FECI2C", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 64, .ngpio = 4, }, .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, }, { .gpio_chip = { .label = "UART", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 72, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_UART, .podr = (void __iomem *) MCFGPIO_PODR_UART, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UART, .setr = (void __iomem *) MCFGPIO_PPDSDR_UART, .clrr = (void __iomem *) MCFGPIO_PCLRR_UART, }, { .gpio_chip = { .label = "QSPI", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 80, .ngpio = 6, }, .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, .podr = (void __iomem *) MCFGPIO_PODR_QSPI, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, }, { .gpio_chip = { .label = "TIMER", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 88, .ngpio = 4, }, .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, .podr = (void __iomem *) MCFGPIO_PODR_TIMER, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, }, { .gpio_chip = { .label = "LCDDATAH", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 96, .ngpio = 2, }, .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAH, .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAH, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH, .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH, .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAH, }, { .gpio_chip = { .label = "LCDDATAM", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 104, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAM, .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAM, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM, .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM, .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAM, }, { .gpio_chip = { .label = "LCDDATAL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 112, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAL, .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAL, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL, .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL, .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAL, }, { .gpio_chip = { .label = "LCDCTLH", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 120, .ngpio = 1, }, .pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLH, .podr = (void __iomem *) MCFGPIO_PODR_LCDCTLH, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH, .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH, .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLH, }, { .gpio_chip = { .label = "LCDCTLL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 128, .ngpio = 8, }, .pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLL, .podr = (void __iomem *) MCFGPIO_PODR_LCDCTLL, .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL, .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL, .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLL, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
gpl-2.0
lozohcum/kernel
lib/raid6/sse2.c
8470
8539
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright 2002 H. Peter Anvin - All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, Inc., 53 Temple Place Ste 330, * Boston MA 02111-1307, USA; either version 2 of the License, or * (at your option) any later version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * raid6/sse2.c * * SSE-2 implementation of RAID-6 syndrome functions * */ #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) #include <linux/raid/pq.h> #include "x86.h" static const struct raid6_sse_constants { u64 x1d[2]; } raid6_sse_constants __attribute__((aligned(16))) = { { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL }, }; static int raid6_have_sse2(void) { /* Not really boot_cpu but "all_cpus" */ return boot_cpu_has(X86_FEATURE_MMX) && boot_cpu_has(X86_FEATURE_FXSR) && boot_cpu_has(X86_FEATURE_XMM) && boot_cpu_has(X86_FEATURE_XMM2); } /* * Plain SSE2 implementation */ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ for ( d = 0 ; d < bytes ; d += 16 ) { asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d])); for ( z = z0-2 ; z >= 0 ; z-- ) { asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("paddb %xmm4,%xmm4"); asm volatile("pand %xmm0,%xmm5"); asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm5,%xmm5"); asm volatile("pxor %xmm6,%xmm2"); asm volatile("pxor %xmm6,%xmm4"); asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d])); } asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("paddb %xmm4,%xmm4"); asm volatile("pand %xmm0,%xmm5"); asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm5,%xmm5"); asm volatile("pxor %xmm6,%xmm2"); asm volatile("pxor %xmm6,%xmm4"); asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); asm volatile("pxor %xmm2,%xmm2"); asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); asm volatile("pxor %xmm4,%xmm4"); } asm volatile("sfence" : : : "memory"); kernel_fpu_end(); } const struct raid6_calls raid6_sse2x1 = { raid6_sse21_gen_syndrome, raid6_have_sse2, "sse2x1", 1 /* Has cache hints */ }; /* * Unrolled-by-2 SSE2 implementation */ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */ /* We uniformly assume a single prefetch covers at least 32 bytes */ for ( d = 0 ; d < bytes ; d += 32 ) { asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */ asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */ for ( z = z0-1 ; z >= 0 ; z-- ) { asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("pcmpgtb %xmm6,%xmm7"); asm volatile("paddb %xmm4,%xmm4"); asm volatile("paddb %xmm6,%xmm6"); asm volatile("pand %xmm0,%xmm5"); asm volatile("pand %xmm0,%xmm7"); asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm7,%xmm6"); asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d])); asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16])); asm volatile("pxor %xmm5,%xmm2"); asm volatile("pxor %xmm7,%xmm3"); asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm7,%xmm6"); asm volatile("pxor %xmm5,%xmm5"); asm volatile("pxor %xmm7,%xmm7"); } asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); } asm volatile("sfence" : : : "memory"); kernel_fpu_end(); } const struct raid6_calls raid6_sse2x2 = { raid6_sse22_gen_syndrome, raid6_have_sse2, "sse2x2", 1 /* Has cache hints */ }; #endif #if defined(__x86_64__) && !defined(__arch_um__) /* * Unrolled-by-4 SSE2 implementation */ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) { u8 **dptr = (u8 **)ptrs; u8 *p, *q; int d, z, z0; z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ q = dptr[z0+2]; /* RS syndrome */ kernel_fpu_begin(); asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0])); asm volatile("pxor %xmm2,%xmm2"); /* P[0] */ asm volatile("pxor %xmm3,%xmm3"); /* P[1] */ asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */ asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */ asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */ asm volatile("pxor %xmm10,%xmm10"); /* P[2] */ asm volatile("pxor %xmm11,%xmm11"); /* P[3] */ asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */ asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */ asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */ asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */ for ( d = 0 ; d < bytes ; d += 64 ) { for ( z = z0 ; z >= 0 ; z-- ) { /* The second prefetch seems to improve performance... */ asm volatile("prefetchnta %0" :: "m" (dptr[z][d])); asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32])); asm volatile("pcmpgtb %xmm4,%xmm5"); asm volatile("pcmpgtb %xmm6,%xmm7"); asm volatile("pcmpgtb %xmm12,%xmm13"); asm volatile("pcmpgtb %xmm14,%xmm15"); asm volatile("paddb %xmm4,%xmm4"); asm volatile("paddb %xmm6,%xmm6"); asm volatile("paddb %xmm12,%xmm12"); asm volatile("paddb %xmm14,%xmm14"); asm volatile("pand %xmm0,%xmm5"); asm volatile("pand %xmm0,%xmm7"); asm volatile("pand %xmm0,%xmm13"); asm volatile("pand %xmm0,%xmm15"); asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm7,%xmm6"); asm volatile("pxor %xmm13,%xmm12"); asm volatile("pxor %xmm15,%xmm14"); asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32])); asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48])); asm volatile("pxor %xmm5,%xmm2"); asm volatile("pxor %xmm7,%xmm3"); asm volatile("pxor %xmm13,%xmm10"); asm volatile("pxor %xmm15,%xmm11"); asm volatile("pxor %xmm5,%xmm4"); asm volatile("pxor %xmm7,%xmm6"); asm volatile("pxor %xmm13,%xmm12"); asm volatile("pxor %xmm15,%xmm14"); asm volatile("pxor %xmm5,%xmm5"); asm volatile("pxor %xmm7,%xmm7"); asm volatile("pxor %xmm13,%xmm13"); asm volatile("pxor %xmm15,%xmm15"); } asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); asm volatile("pxor %xmm2,%xmm2"); asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); asm volatile("pxor %xmm3,%xmm3"); asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32])); asm volatile("pxor %xmm10,%xmm10"); asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48])); asm volatile("pxor %xmm11,%xmm11"); asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); asm volatile("pxor %xmm4,%xmm4"); asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); asm volatile("pxor %xmm6,%xmm6"); asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32])); asm volatile("pxor %xmm12,%xmm12"); asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); asm volatile("pxor %xmm14,%xmm14"); } asm volatile("sfence" : : : "memory"); kernel_fpu_end(); } const struct raid6_calls raid6_sse2x4 = { raid6_sse24_gen_syndrome, raid6_have_sse2, "sse2x4", 1 /* Has cache hints */ }; #endif
gpl-2.0
scto/android_kernel_jide_sk1wg
drivers/parport/share.c
10518
29890
/* * Parallel-port resource manager code. * * Authors: David Campbell <campbell@tirian.che.curtin.edu.au> * Tim Waugh <tim@cyberelk.demon.co.uk> * Jose Renau <renau@acm.org> * Philip Blundell <philb@gnu.org> * Andrea Arcangeli * * based on work by Grant Guenther <grant@torque.net> * and Philip Blundell * * Any part of this program may be used in documents licensed under * the GNU Free Documentation License, Version 1.1 or any later version * published by the Free Software Foundation. */ #undef PARPORT_DEBUG_SHARING /* undef for production */ #include <linux/module.h> #include <linux/string.h> #include <linux/threads.h> #include <linux/parport.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <asm/irq.h> #undef PARPORT_PARANOID #define PARPORT_DEFAULT_TIMESLICE (HZ/5) unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE; int parport_default_spintime = DEFAULT_SPIN_TIME; static LIST_HEAD(portlist); static DEFINE_SPINLOCK(parportlist_lock); /* list of all allocated ports, sorted by ->number */ static LIST_HEAD(all_ports); static DEFINE_SPINLOCK(full_list_lock); static LIST_HEAD(drivers); static DEFINE_MUTEX(registration_lock); /* What you can do to a port that's gone away.. */ static void dead_write_lines (struct parport *p, unsigned char b){} static unsigned char dead_read_lines (struct parport *p) { return 0; } static unsigned char dead_frob_lines (struct parport *p, unsigned char b, unsigned char c) { return 0; } static void dead_onearg (struct parport *p){} static void dead_initstate (struct pardevice *d, struct parport_state *s) { } static void dead_state (struct parport *p, struct parport_state *s) { } static size_t dead_write (struct parport *p, const void *b, size_t l, int f) { return 0; } static size_t dead_read (struct parport *p, void *b, size_t l, int f) { return 0; } static struct parport_operations dead_ops = { .write_data = dead_write_lines, /* data */ .read_data = dead_read_lines, .write_control = dead_write_lines, /* control */ .read_control = dead_read_lines, .frob_control = dead_frob_lines, .read_status = dead_read_lines, /* status */ .enable_irq = dead_onearg, /* enable_irq */ .disable_irq = dead_onearg, /* disable_irq */ .data_forward = dead_onearg, /* data_forward */ .data_reverse = dead_onearg, /* data_reverse */ .init_state = dead_initstate, /* init_state */ .save_state = dead_state, .restore_state = dead_state, .epp_write_data = dead_write, /* epp */ .epp_read_data = dead_read, .epp_write_addr = dead_write, .epp_read_addr = dead_read, .ecp_write_data = dead_write, /* ecp */ .ecp_read_data = dead_read, .ecp_write_addr = dead_write, .compat_write_data = dead_write, /* compat */ .nibble_read_data = dead_read, /* nibble */ .byte_read_data = dead_read, /* byte */ .owner = NULL, }; /* Call attach(port) for each registered driver. */ static void attach_driver_chain(struct parport *port) { /* caller has exclusive registration_lock */ struct parport_driver *drv; list_for_each_entry(drv, &drivers, list) drv->attach(port); } /* Call detach(port) for each registered driver. */ static void detach_driver_chain(struct parport *port) { struct parport_driver *drv; /* caller has exclusive registration_lock */ list_for_each_entry(drv, &drivers, list) drv->detach (port); } /* Ask kmod for some lowlevel drivers. */ static void get_lowlevel_driver (void) { /* There is no actual module called this: you should set * up an alias for modutils. */ request_module ("parport_lowlevel"); } /** * parport_register_driver - register a parallel port device driver * @drv: structure describing the driver * * This can be called by a parallel port device driver in order * to receive notifications about ports being found in the * system, as well as ports no longer available. * * The @drv structure is allocated by the caller and must not be * deallocated until after calling parport_unregister_driver(). * * The driver's attach() function may block. The port that * attach() is given will be valid for the duration of the * callback, but if the driver wants to take a copy of the * pointer it must call parport_get_port() to do so. Calling * parport_register_device() on that port will do this for you. * * The driver's detach() function may block. The port that * detach() is given will be valid for the duration of the * callback, but if the driver wants to take a copy of the * pointer it must call parport_get_port() to do so. * * Returns 0 on success. Currently it always succeeds. **/ int parport_register_driver (struct parport_driver *drv) { struct parport *port; if (list_empty(&portlist)) get_lowlevel_driver (); mutex_lock(&registration_lock); list_for_each_entry(port, &portlist, list) drv->attach(port); list_add(&drv->list, &drivers); mutex_unlock(&registration_lock); return 0; } /** * parport_unregister_driver - deregister a parallel port device driver * @drv: structure describing the driver that was given to * parport_register_driver() * * This should be called by a parallel port device driver that * has registered itself using parport_register_driver() when it * is about to be unloaded. * * When it returns, the driver's attach() routine will no longer * be called, and for each port that attach() was called for, the * detach() routine will have been called. * * All the driver's attach() and detach() calls are guaranteed to have * finished by the time this function returns. **/ void parport_unregister_driver (struct parport_driver *drv) { struct parport *port; mutex_lock(&registration_lock); list_del_init(&drv->list); list_for_each_entry(port, &portlist, list) drv->detach(port); mutex_unlock(&registration_lock); } static void free_port (struct parport *port) { int d; spin_lock(&full_list_lock); list_del(&port->full_list); spin_unlock(&full_list_lock); for (d = 0; d < 5; d++) { kfree(port->probe_info[d].class_name); kfree(port->probe_info[d].mfr); kfree(port->probe_info[d].model); kfree(port->probe_info[d].cmdset); kfree(port->probe_info[d].description); } kfree(port->name); kfree(port); } /** * parport_get_port - increment a port's reference count * @port: the port * * This ensures that a struct parport pointer remains valid * until the matching parport_put_port() call. **/ struct parport *parport_get_port (struct parport *port) { atomic_inc (&port->ref_count); return port; } /** * parport_put_port - decrement a port's reference count * @port: the port * * This should be called once for each call to parport_get_port(), * once the port is no longer needed. **/ void parport_put_port (struct parport *port) { if (atomic_dec_and_test (&port->ref_count)) /* Can destroy it now. */ free_port (port); return; } /** * parport_register_port - register a parallel port * @base: base I/O address * @irq: IRQ line * @dma: DMA channel * @ops: pointer to the port driver's port operations structure * * When a parallel port (lowlevel) driver finds a port that * should be made available to parallel port device drivers, it * should call parport_register_port(). The @base, @irq, and * @dma parameters are for the convenience of port drivers, and * for ports where they aren't meaningful needn't be set to * anything special. They can be altered afterwards by adjusting * the relevant members of the parport structure that is returned * and represents the port. They should not be tampered with * after calling parport_announce_port, however. * * If there are parallel port device drivers in the system that * have registered themselves using parport_register_driver(), * they are not told about the port at this time; that is done by * parport_announce_port(). * * The @ops structure is allocated by the caller, and must not be * deallocated before calling parport_remove_port(). * * If there is no memory to allocate a new parport structure, * this function will return %NULL. **/ struct parport *parport_register_port(unsigned long base, int irq, int dma, struct parport_operations *ops) { struct list_head *l; struct parport *tmp; int num; int device; char *name; tmp = kmalloc(sizeof(struct parport), GFP_KERNEL); if (!tmp) { printk(KERN_WARNING "parport: memory squeeze\n"); return NULL; } /* Init our structure */ memset(tmp, 0, sizeof(struct parport)); tmp->base = base; tmp->irq = irq; tmp->dma = dma; tmp->muxport = tmp->daisy = tmp->muxsel = -1; tmp->modes = 0; INIT_LIST_HEAD(&tmp->list); tmp->devices = tmp->cad = NULL; tmp->flags = 0; tmp->ops = ops; tmp->physport = tmp; memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info)); rwlock_init(&tmp->cad_lock); spin_lock_init(&tmp->waitlist_lock); spin_lock_init(&tmp->pardevice_lock); tmp->ieee1284.mode = IEEE1284_MODE_COMPAT; tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; sema_init(&tmp->ieee1284.irq, 0); tmp->spintime = parport_default_spintime; atomic_set (&tmp->ref_count, 1); INIT_LIST_HEAD(&tmp->full_list); name = kmalloc(15, GFP_KERNEL); if (!name) { printk(KERN_ERR "parport: memory squeeze\n"); kfree(tmp); return NULL; } /* Search for the lowest free parport number. */ spin_lock(&full_list_lock); for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) { struct parport *p = list_entry(l, struct parport, full_list); if (p->number != num) break; } tmp->portnum = tmp->number = num; list_add_tail(&tmp->full_list, l); spin_unlock(&full_list_lock); /* * Now that the portnum is known finish doing the Init. */ sprintf(name, "parport%d", tmp->portnum = tmp->number); tmp->name = name; for (device = 0; device < 5; device++) /* assume the worst */ tmp->probe_info[device].class = PARPORT_CLASS_LEGACY; tmp->waithead = tmp->waittail = NULL; return tmp; } /** * parport_announce_port - tell device drivers about a parallel port * @port: parallel port to announce * * After a port driver has registered a parallel port with * parport_register_port, and performed any necessary * initialisation or adjustments, it should call * parport_announce_port() in order to notify all device drivers * that have called parport_register_driver(). Their attach() * functions will be called, with @port as the parameter. **/ void parport_announce_port (struct parport *port) { int i; #ifdef CONFIG_PARPORT_1284 /* Analyse the IEEE1284.3 topology of the port. */ parport_daisy_init(port); #endif if (!port->dev) printk(KERN_WARNING "%s: fix this legacy " "no-device port driver!\n", port->name); parport_proc_register(port); mutex_lock(&registration_lock); spin_lock_irq(&parportlist_lock); list_add_tail(&port->list, &portlist); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) list_add_tail(&slave->list, &portlist); } spin_unlock_irq(&parportlist_lock); /* Let drivers know that new port(s) has arrived. */ attach_driver_chain (port); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) attach_driver_chain(slave); } mutex_unlock(&registration_lock); } /** * parport_remove_port - deregister a parallel port * @port: parallel port to deregister * * When a parallel port driver is forcibly unloaded, or a * parallel port becomes inaccessible, the port driver must call * this function in order to deal with device drivers that still * want to use it. * * The parport structure associated with the port has its * operations structure replaced with one containing 'null' * operations that return errors or just don't do anything. * * Any drivers that have registered themselves using * parport_register_driver() are notified that the port is no * longer accessible by having their detach() routines called * with @port as the parameter. **/ void parport_remove_port(struct parport *port) { int i; mutex_lock(&registration_lock); /* Spread the word. */ detach_driver_chain (port); #ifdef CONFIG_PARPORT_1284 /* Forget the IEEE1284.3 topology of the port. */ parport_daisy_fini(port); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (!slave) continue; detach_driver_chain(slave); parport_daisy_fini(slave); } #endif port->ops = &dead_ops; spin_lock(&parportlist_lock); list_del_init(&port->list); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) list_del_init(&slave->list); } spin_unlock(&parportlist_lock); mutex_unlock(&registration_lock); parport_proc_unregister(port); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) parport_put_port(slave); } } /** * parport_register_device - register a device on a parallel port * @port: port to which the device is attached * @name: a name to refer to the device * @pf: preemption callback * @kf: kick callback (wake-up) * @irq_func: interrupt handler * @flags: registration flags * @handle: data for callback functions * * This function, called by parallel port device drivers, * declares that a device is connected to a port, and tells the * system all it needs to know. * * The @name is allocated by the caller and must not be * deallocated until the caller calls @parport_unregister_device * for that device. * * The preemption callback function, @pf, is called when this * device driver has claimed access to the port but another * device driver wants to use it. It is given @handle as its * parameter, and should return zero if it is willing for the * system to release the port to another driver on its behalf. * If it wants to keep control of the port it should return * non-zero, and no action will be taken. It is good manners for * the driver to try to release the port at the earliest * opportunity after its preemption callback rejects a preemption * attempt. Note that if a preemption callback is happy for * preemption to go ahead, there is no need to release the port; * it is done automatically. This function may not block, as it * may be called from interrupt context. If the device driver * does not support preemption, @pf can be %NULL. * * The wake-up ("kick") callback function, @kf, is called when * the port is available to be claimed for exclusive access; that * is, parport_claim() is guaranteed to succeed when called from * inside the wake-up callback function. If the driver wants to * claim the port it should do so; otherwise, it need not take * any action. This function may not block, as it may be called * from interrupt context. If the device driver does not want to * be explicitly invited to claim the port in this way, @kf can * be %NULL. * * The interrupt handler, @irq_func, is called when an interrupt * arrives from the parallel port. Note that if a device driver * wants to use interrupts it should use parport_enable_irq(), * and can also check the irq member of the parport structure * representing the port. * * The parallel port (lowlevel) driver is the one that has called * request_irq() and whose interrupt handler is called first. * This handler does whatever needs to be done to the hardware to * acknowledge the interrupt (for PC-style ports there is nothing * special to be done). It then tells the IEEE 1284 code about * the interrupt, which may involve reacting to an IEEE 1284 * event depending on the current IEEE 1284 phase. After this, * it calls @irq_func. Needless to say, @irq_func will be called * from interrupt context, and may not block. * * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and * so should only be used when sharing the port with other device * drivers is impossible and would lead to incorrect behaviour. * Use it sparingly! Normally, @flags will be zero. * * This function returns a pointer to a structure that represents * the device on the port, or %NULL if there is not enough memory * to allocate space for that structure. **/ struct pardevice * parport_register_device(struct parport *port, const char *name, int (*pf)(void *), void (*kf)(void *), void (*irq_func)(void *), int flags, void *handle) { struct pardevice *tmp; if (port->physport->flags & PARPORT_FLAG_EXCL) { /* An exclusive device is registered. */ printk (KERN_DEBUG "%s: no more devices allowed\n", port->name); return NULL; } if (flags & PARPORT_DEV_LURK) { if (!pf || !kf) { printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name); return NULL; } } /* We up our own module reference count, and that of the port on which a device is to be registered, to ensure that neither of us gets unloaded while we sleep in (e.g.) kmalloc. */ if (!try_module_get(port->ops->owner)) { return NULL; } parport_get_port (port); tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL); if (tmp == NULL) { printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); goto out; } tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL); if (tmp->state == NULL) { printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); goto out_free_pardevice; } tmp->name = name; tmp->port = port; tmp->daisy = -1; tmp->preempt = pf; tmp->wakeup = kf; tmp->private = handle; tmp->flags = flags; tmp->irq_func = irq_func; tmp->waiting = 0; tmp->timeout = 5 * HZ; /* Chain this onto the list */ tmp->prev = NULL; /* * This function must not run from an irq handler so we don' t need * to clear irq on the local CPU. -arca */ spin_lock(&port->physport->pardevice_lock); if (flags & PARPORT_DEV_EXCL) { if (port->physport->devices) { spin_unlock (&port->physport->pardevice_lock); printk (KERN_DEBUG "%s: cannot grant exclusive access for " "device %s\n", port->name, name); goto out_free_all; } port->flags |= PARPORT_FLAG_EXCL; } tmp->next = port->physport->devices; wmb(); /* Make sure that tmp->next is written before it's added to the list; see comments marked 'no locking required' */ if (port->physport->devices) port->physport->devices->prev = tmp; port->physport->devices = tmp; spin_unlock(&port->physport->pardevice_lock); init_waitqueue_head(&tmp->wait_q); tmp->timeslice = parport_default_timeslice; tmp->waitnext = tmp->waitprev = NULL; /* * This has to be run as last thing since init_state may need other * pardevice fields. -arca */ port->ops->init_state(tmp, tmp->state); if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { port->proc_device = tmp; parport_device_proc_register(tmp); } return tmp; out_free_all: kfree(tmp->state); out_free_pardevice: kfree(tmp); out: parport_put_port (port); module_put(port->ops->owner); return NULL; } /** * parport_unregister_device - deregister a device on a parallel port * @dev: pointer to structure representing device * * This undoes the effect of parport_register_device(). **/ void parport_unregister_device(struct pardevice *dev) { struct parport *port; #ifdef PARPORT_PARANOID if (dev == NULL) { printk(KERN_ERR "parport_unregister_device: passed NULL\n"); return; } #endif port = dev->port->physport; if (port->proc_device == dev) { port->proc_device = NULL; clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags); parport_device_proc_unregister(dev); } if (port->cad == dev) { printk(KERN_DEBUG "%s: %s forgot to release port\n", port->name, dev->name); parport_release (dev); } spin_lock(&port->pardevice_lock); if (dev->next) dev->next->prev = dev->prev; if (dev->prev) dev->prev->next = dev->next; else port->devices = dev->next; if (dev->flags & PARPORT_DEV_EXCL) port->flags &= ~PARPORT_FLAG_EXCL; spin_unlock(&port->pardevice_lock); /* Make sure we haven't left any pointers around in the wait * list. */ spin_lock_irq(&port->waitlist_lock); if (dev->waitprev || dev->waitnext || port->waithead == dev) { if (dev->waitprev) dev->waitprev->waitnext = dev->waitnext; else port->waithead = dev->waitnext; if (dev->waitnext) dev->waitnext->waitprev = dev->waitprev; else port->waittail = dev->waitprev; } spin_unlock_irq(&port->waitlist_lock); kfree(dev->state); kfree(dev); module_put(port->ops->owner); parport_put_port (port); } /** * parport_find_number - find a parallel port by number * @number: parallel port number * * This returns the parallel port with the specified number, or * %NULL if there is none. * * There is an implicit parport_get_port() done already; to throw * away the reference to the port that parport_find_number() * gives you, use parport_put_port(). */ struct parport *parport_find_number (int number) { struct parport *port, *result = NULL; if (list_empty(&portlist)) get_lowlevel_driver (); spin_lock (&parportlist_lock); list_for_each_entry(port, &portlist, list) { if (port->number == number) { result = parport_get_port (port); break; } } spin_unlock (&parportlist_lock); return result; } /** * parport_find_base - find a parallel port by base address * @base: base I/O address * * This returns the parallel port with the specified base * address, or %NULL if there is none. * * There is an implicit parport_get_port() done already; to throw * away the reference to the port that parport_find_base() * gives you, use parport_put_port(). */ struct parport *parport_find_base (unsigned long base) { struct parport *port, *result = NULL; if (list_empty(&portlist)) get_lowlevel_driver (); spin_lock (&parportlist_lock); list_for_each_entry(port, &portlist, list) { if (port->base == base) { result = parport_get_port (port); break; } } spin_unlock (&parportlist_lock); return result; } /** * parport_claim - claim access to a parallel port device * @dev: pointer to structure representing a device on the port * * This function will not block and so can be used from interrupt * context. If parport_claim() succeeds in claiming access to * the port it returns zero and the port is available to use. It * may fail (returning non-zero) if the port is in use by another * driver and that driver is not willing to relinquish control of * the port. **/ int parport_claim(struct pardevice *dev) { struct pardevice *oldcad; struct parport *port = dev->port->physport; unsigned long flags; if (port->cad == dev) { printk(KERN_INFO "%s: %s already owner\n", dev->port->name,dev->name); return 0; } /* Preempt any current device */ write_lock_irqsave (&port->cad_lock, flags); if ((oldcad = port->cad) != NULL) { if (oldcad->preempt) { if (oldcad->preempt(oldcad->private)) goto blocked; port->ops->save_state(port, dev->state); } else goto blocked; if (port->cad != oldcad) { /* I think we'll actually deadlock rather than get here, but just in case.. */ printk(KERN_WARNING "%s: %s released port when preempted!\n", port->name, oldcad->name); if (port->cad) goto blocked; } } /* Can't fail from now on, so mark ourselves as no longer waiting. */ if (dev->waiting & 1) { dev->waiting = 0; /* Take ourselves out of the wait list again. */ spin_lock_irq (&port->waitlist_lock); if (dev->waitprev) dev->waitprev->waitnext = dev->waitnext; else port->waithead = dev->waitnext; if (dev->waitnext) dev->waitnext->waitprev = dev->waitprev; else port->waittail = dev->waitprev; spin_unlock_irq (&port->waitlist_lock); dev->waitprev = dev->waitnext = NULL; } /* Now we do the change of devices */ port->cad = dev; #ifdef CONFIG_PARPORT_1284 /* If it's a mux port, select it. */ if (dev->port->muxport >= 0) { /* FIXME */ port->muxsel = dev->port->muxport; } /* If it's a daisy chain device, select it. */ if (dev->daisy >= 0) { /* This could be lazier. */ if (!parport_daisy_select (port, dev->daisy, IEEE1284_MODE_COMPAT)) port->daisy = dev->daisy; } #endif /* IEEE1284.3 support */ /* Restore control registers */ port->ops->restore_state(port, dev->state); write_unlock_irqrestore(&port->cad_lock, flags); dev->time = jiffies; return 0; blocked: /* If this is the first time we tried to claim the port, register an interest. This is only allowed for devices sleeping in parport_claim_or_block(), or those with a wakeup function. */ /* The cad_lock is still held for writing here */ if (dev->waiting & 2 || dev->wakeup) { spin_lock (&port->waitlist_lock); if (test_and_set_bit(0, &dev->waiting) == 0) { /* First add ourselves to the end of the wait list. */ dev->waitnext = NULL; dev->waitprev = port->waittail; if (port->waittail) { port->waittail->waitnext = dev; port->waittail = dev; } else port->waithead = port->waittail = dev; } spin_unlock (&port->waitlist_lock); } write_unlock_irqrestore (&port->cad_lock, flags); return -EAGAIN; } /** * parport_claim_or_block - claim access to a parallel port device * @dev: pointer to structure representing a device on the port * * This behaves like parport_claim(), but will block if necessary * to wait for the port to be free. A return value of 1 * indicates that it slept; 0 means that it succeeded without * needing to sleep. A negative error code indicates failure. **/ int parport_claim_or_block(struct pardevice *dev) { int r; /* Signal to parport_claim() that we can wait even without a wakeup function. */ dev->waiting = 2; /* Try to claim the port. If this fails, we need to sleep. */ r = parport_claim(dev); if (r == -EAGAIN) { #ifdef PARPORT_DEBUG_SHARING printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name); #endif /* * FIXME!!! Use the proper locking for dev->waiting, * and make this use the "wait_event_interruptible()" * interfaces. The cli/sti that used to be here * did nothing. * * See also parport_release() */ /* If dev->waiting is clear now, an interrupt gave us the port and we would deadlock if we slept. */ if (dev->waiting) { interruptible_sleep_on (&dev->wait_q); if (signal_pending (current)) { return -EINTR; } r = 1; } else { r = 0; #ifdef PARPORT_DEBUG_SHARING printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n", dev->name); #endif } #ifdef PARPORT_DEBUG_SHARING if (dev->port->physport->cad != dev) printk(KERN_DEBUG "%s: exiting parport_claim_or_block " "but %s owns port!\n", dev->name, dev->port->physport->cad ? dev->port->physport->cad->name:"nobody"); #endif } dev->waiting = 0; return r; } /** * parport_release - give up access to a parallel port device * @dev: pointer to structure representing parallel port device * * This function cannot fail, but it should not be called without * the port claimed. Similarly, if the port is already claimed * you should not try claiming it again. **/ void parport_release(struct pardevice *dev) { struct parport *port = dev->port->physport; struct pardevice *pd; unsigned long flags; /* Make sure that dev is the current device */ write_lock_irqsave(&port->cad_lock, flags); if (port->cad != dev) { write_unlock_irqrestore (&port->cad_lock, flags); printk(KERN_WARNING "%s: %s tried to release parport " "when not owner\n", port->name, dev->name); return; } #ifdef CONFIG_PARPORT_1284 /* If this is on a mux port, deselect it. */ if (dev->port->muxport >= 0) { /* FIXME */ port->muxsel = -1; } /* If this is a daisy device, deselect it. */ if (dev->daisy >= 0) { parport_daisy_deselect_all (port); port->daisy = -1; } #endif port->cad = NULL; write_unlock_irqrestore(&port->cad_lock, flags); /* Save control registers */ port->ops->save_state(port, dev->state); /* If anybody is waiting, find out who's been there longest and then wake them up. (Note: no locking required) */ /* !!! LOCKING IS NEEDED HERE */ for (pd = port->waithead; pd; pd = pd->waitnext) { if (pd->waiting & 2) { /* sleeping in claim_or_block */ parport_claim(pd); if (waitqueue_active(&pd->wait_q)) wake_up_interruptible(&pd->wait_q); return; } else if (pd->wakeup) { pd->wakeup(pd->private); if (dev->port->cad) /* racy but no matter */ return; } else { printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name); } } /* Nobody was waiting, so walk the list to see if anyone is interested in being woken up. (Note: no locking required) */ /* !!! LOCKING IS NEEDED HERE */ for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) { if (pd->wakeup && pd != dev) pd->wakeup(pd->private); } } irqreturn_t parport_irq_handler(int irq, void *dev_id) { struct parport *port = dev_id; parport_generic_irq(port); return IRQ_HANDLED; } /* Exported symbols for modules. */ EXPORT_SYMBOL(parport_claim); EXPORT_SYMBOL(parport_claim_or_block); EXPORT_SYMBOL(parport_release); EXPORT_SYMBOL(parport_register_port); EXPORT_SYMBOL(parport_announce_port); EXPORT_SYMBOL(parport_remove_port); EXPORT_SYMBOL(parport_register_driver); EXPORT_SYMBOL(parport_unregister_driver); EXPORT_SYMBOL(parport_register_device); EXPORT_SYMBOL(parport_unregister_device); EXPORT_SYMBOL(parport_get_port); EXPORT_SYMBOL(parport_put_port); EXPORT_SYMBOL(parport_find_number); EXPORT_SYMBOL(parport_find_base); EXPORT_SYMBOL(parport_irq_handler); MODULE_LICENSE("GPL");
gpl-2.0
StelixROM/android_kernel_lge_hammerhead
fs/nls/nls_iso8859-5.c
12566
10909
/* * linux/fs/nls/nls_iso8859-5.c * * Charset iso8859-5 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x00ad, 0x040e, 0x040f, /* 0xb0*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0xc0*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xd0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xe0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, /* 0xf0*/ 0x2116, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045a, 0x045b, 0x045c, 0x00a7, 0x045e, 0x045f, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */ }; static const unsigned char page04[256] = { 0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x00, 0xae, 0xaf, /* 0x08-0x0f */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, 0xfe, 0xff, /* 0x58-0x5f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x00, /* 0x10-0x17 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xa0-0xa7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xad, 0xfe, 0xff, /* 0xa8-0xaf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xb0-0xb7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xd0-0xd7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xf0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xf0-0xf7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xfd, 0xae, 0xaf, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-5", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_iso8859_5(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_5(void) { unregister_nls(&table); } module_init(init_nls_iso8859_5) module_exit(exit_nls_iso8859_5) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
kumasento/linux
drivers/infiniband/hw/amso1100/c2_vq.c
12566
7714
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/spinlock.h> #include "c2_vq.h" #include "c2_provider.h" /* * Verbs Request Objects: * * VQ Request Objects are allocated by the kernel verbs handlers. * They contain a wait object, a refcnt, an atomic bool indicating that the * adapter has replied, and a copy of the verb reply work request. * A pointer to the VQ Request Object is passed down in the context * field of the work request message, and reflected back by the adapter * in the verbs reply message. The function handle_vq() in the interrupt * path will use this pointer to: * 1) append a copy of the verbs reply message * 2) mark that the reply is ready * 3) wake up the kernel verbs handler blocked awaiting the reply. * * * The kernel verbs handlers do a "get" to put a 2nd reference on the * VQ Request object. If the kernel verbs handler exits before the adapter * can respond, this extra reference will keep the VQ Request object around * until the adapter's reply can be processed. The reason we need this is * because a pointer to this object is stuffed into the context field of * the verbs work request message, and reflected back in the reply message. * It is used in the interrupt handler (handle_vq()) to wake up the appropriate * kernel verb handler that is blocked awaiting the verb reply. * So handle_vq() will do a "put" on the object when it's done accessing it. * NOTE: If we guarantee that the kernel verb handler will never bail before * getting the reply, then we don't need these refcnts. * * * VQ Request objects are freed by the kernel verbs handlers only * after the verb has been processed, or when the adapter fails and * does not reply. * * * Verbs Reply Buffers: * * VQ Reply bufs are local host memory copies of a * outstanding Verb Request reply * message. The are always allocated by the kernel verbs handlers, and _may_ be * freed by either the kernel verbs handler -or- the interrupt handler. The * kernel verbs handler _must_ free the repbuf, then free the vq request object * in that order. */ int vq_init(struct c2_dev *c2dev) { sprintf(c2dev->vq_cache_name, "c2-vq:dev%c", (char) ('0' + c2dev->devnum)); c2dev->host_msg_cache = kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (c2dev->host_msg_cache == NULL) { return -ENOMEM; } return 0; } void vq_term(struct c2_dev *c2dev) { kmem_cache_destroy(c2dev->host_msg_cache); } /* vq_req_alloc - allocate a VQ Request Object and initialize it. * The refcnt is set to 1. */ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) { struct c2_vq_req *r; r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); if (r) { init_waitqueue_head(&r->wait_object); r->reply_msg = 0; r->event = 0; r->cm_id = NULL; r->qp = NULL; atomic_set(&r->refcnt, 1); atomic_set(&r->reply_ready, 0); } return r; } /* vq_req_free - free the VQ Request Object. It is assumed the verbs handler * has already free the VQ Reply Buffer if it existed. */ void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) { r->reply_msg = 0; if (atomic_dec_and_test(&r->refcnt)) { kfree(r); } } /* vq_req_get - reference a VQ Request Object. Done * only in the kernel verbs handlers. */ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r) { atomic_inc(&r->refcnt); } /* vq_req_put - dereference and potentially free a VQ Request Object. * * This is only called by handle_vq() on the * interrupt when it is done processing * a verb reply message. If the associated * kernel verbs handler has already bailed, * then this put will actually free the VQ * Request object _and_ the VQ Reply Buffer * if it exists. */ void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) { if (atomic_dec_and_test(&r->refcnt)) { if (r->reply_msg != 0) vq_repbuf_free(c2dev, (void *) (unsigned long) r->reply_msg); kfree(r); } } /* * vq_repbuf_alloc - allocate a VQ Reply Buffer. */ void *vq_repbuf_alloc(struct c2_dev *c2dev) { return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC); } /* * vq_send_wr - post a verbs request message to the Verbs Request Queue. * If a message is not available in the MQ, then block until one is available. * NOTE: handle_mq() on the interrupt context will wake up threads blocked here. * When the adapter drains the Verbs Request Queue, * it inserts MQ index 0 in to the * adapter->host activity fifo and interrupts the host. */ int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr) { void *msg; wait_queue_t __wait; /* * grab adapter vq lock */ spin_lock(&c2dev->vqlock); /* * allocate msg */ msg = c2_mq_alloc(&c2dev->req_vq); /* * If we cannot get a msg, then we'll wait * When a messages are available, the int handler will wake_up() * any waiters. */ while (msg == NULL) { pr_debug("%s:%d no available msg in VQ, waiting...\n", __func__, __LINE__); init_waitqueue_entry(&__wait, current); add_wait_queue(&c2dev->req_vq_wo, &__wait); spin_unlock(&c2dev->vqlock); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!c2_mq_full(&c2dev->req_vq)) { break; } if (!signal_pending(current)) { schedule_timeout(1 * HZ); /* 1 second... */ continue; } set_current_state(TASK_RUNNING); remove_wait_queue(&c2dev->req_vq_wo, &__wait); return -EINTR; } set_current_state(TASK_RUNNING); remove_wait_queue(&c2dev->req_vq_wo, &__wait); spin_lock(&c2dev->vqlock); msg = c2_mq_alloc(&c2dev->req_vq); } /* * copy wr into adapter msg */ memcpy(msg, wr, c2dev->req_vq.msg_size); /* * post msg */ c2_mq_produce(&c2dev->req_vq); /* * release adapter vq lock */ spin_unlock(&c2dev->vqlock); return 0; } /* * vq_wait_for_reply - block until the adapter posts a Verb Reply Message. */ int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req) { if (!wait_event_timeout(req->wait_object, atomic_read(&req->reply_ready), 60*HZ)) return -ETIMEDOUT; return 0; } /* * vq_repbuf_free - Free a Verbs Reply Buffer. */ void vq_repbuf_free(struct c2_dev *c2dev, void *reply) { kmem_cache_free(c2dev->host_msg_cache, reply); }
gpl-2.0
xiaognol/android_kernel_zte_nx503a-4.2
net/netfilter/xt_CONNSECMARK.c
13590
3666
/* * This module is used to copy security markings from packets * to connections, and restore security markings from connections * back to packets. This would normally be performed in conjunction * with the SECMARK target and state match. * * Based somewhat on CONNMARK: * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_CONNSECMARK.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_ecache.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris@redhat.com>"); MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark"); MODULE_ALIAS("ipt_CONNSECMARK"); MODULE_ALIAS("ip6t_CONNSECMARK"); /* * If the packet has a security mark and the connection does not, copy * the security mark from the packet to the connection. */ static void secmark_save(const struct sk_buff *skb) { if (skb->secmark) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; ct = nf_ct_get(skb, &ctinfo); if (ct && !ct->secmark) { ct->secmark = skb->secmark; nf_conntrack_event_cache(IPCT_SECMARK, ct); } } } /* * If packet has no security mark, and the connection does, restore the * security mark from the connection to the packet. */ static void secmark_restore(struct sk_buff *skb) { if (!skb->secmark) { const struct nf_conn *ct; enum ip_conntrack_info ctinfo; ct = nf_ct_get(skb, &ctinfo); if (ct && ct->secmark) skb->secmark = ct->secmark; } } static unsigned int connsecmark_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connsecmark_target_info *info = par->targinfo; switch (info->mode) { case CONNSECMARK_SAVE: secmark_save(skb); break; case CONNSECMARK_RESTORE: secmark_restore(skb); break; default: BUG(); } return XT_CONTINUE; } static int connsecmark_tg_check(const struct xt_tgchk_param *par) { const struct xt_connsecmark_target_info *info = par->targinfo; int ret; if (strcmp(par->table, "mangle") != 0 && strcmp(par->table, "security") != 0) { pr_info("target only valid in the \'mangle\' " "or \'security\' tables, not \'%s\'.\n", par->table); return -EINVAL; } switch (info->mode) { case CONNSECMARK_SAVE: case CONNSECMARK_RESTORE: break; default: pr_info("invalid mode: %hu\n", info->mode); return -EINVAL; } ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_target connsecmark_tg_reg __read_mostly = { .name = "CONNSECMARK", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = connsecmark_tg_check, .destroy = connsecmark_tg_destroy, .target = connsecmark_tg, .targetsize = sizeof(struct xt_connsecmark_target_info), .me = THIS_MODULE, }; static int __init connsecmark_tg_init(void) { return xt_register_target(&connsecmark_tg_reg); } static void __exit connsecmark_tg_exit(void) { xt_unregister_target(&connsecmark_tg_reg); } module_init(connsecmark_tg_init); module_exit(connsecmark_tg_exit);
gpl-2.0
xiaoleili/linux-mediatek
tools/perf/ui/tui/setup.c
279
3118
#include <signal.h> #include <stdbool.h> #ifdef HAVE_BACKTRACE_SUPPORT #include <execinfo.h> #endif #include "../../util/cache.h" #include "../../util/debug.h" #include "../browser.h" #include "../helpline.h" #include "../ui.h" #include "../util.h" #include "../libslang.h" #include "../keysyms.h" #include "tui.h" static volatile int ui__need_resize; extern struct perf_error_ops perf_tui_eops; extern bool tui_helpline__set; extern void hist_browser__init_hpp(void); void ui__refresh_dimensions(bool force) { if (force || ui__need_resize) { ui__need_resize = 0; pthread_mutex_lock(&ui__lock); SLtt_get_screen_size(); SLsmg_reinit_smg(); pthread_mutex_unlock(&ui__lock); } } static void ui__sigwinch(int sig __maybe_unused) { ui__need_resize = 1; } static void ui__setup_sigwinch(void) { static bool done; if (done) return; done = true; pthread__unblock_sigwinch(); signal(SIGWINCH, ui__sigwinch); } int ui__getch(int delay_secs) { struct timeval timeout, *ptimeout = delay_secs ? &timeout : NULL; fd_set read_set; int err, key; ui__setup_sigwinch(); FD_ZERO(&read_set); FD_SET(0, &read_set); if (delay_secs) { timeout.tv_sec = delay_secs; timeout.tv_usec = 0; } err = select(1, &read_set, NULL, NULL, ptimeout); if (err == 0) return K_TIMER; if (err == -1) { if (errno == EINTR) return K_RESIZE; return K_ERROR; } key = SLang_getkey(); if (key != K_ESC) return key; FD_ZERO(&read_set); FD_SET(0, &read_set); timeout.tv_sec = 0; timeout.tv_usec = 20; err = select(1, &read_set, NULL, NULL, &timeout); if (err == 0) return K_ESC; SLang_ungetkey(key); return SLkp_getkey(); } #ifdef HAVE_BACKTRACE_SUPPORT static void ui__signal_backtrace(int sig) { void *stackdump[32]; size_t size; ui__exit(false); psignal(sig, "perf"); printf("-------- backtrace --------\n"); size = backtrace(stackdump, ARRAY_SIZE(stackdump)); backtrace_symbols_fd(stackdump, size, STDOUT_FILENO); exit(0); } #else # define ui__signal_backtrace ui__signal #endif static void ui__signal(int sig) { ui__exit(false); psignal(sig, "perf"); exit(0); } int ui__init(void) { int err; SLutf8_enable(-1); SLtt_get_terminfo(); SLtt_get_screen_size(); err = SLsmg_init_smg(); if (err < 0) goto out; err = SLang_init_tty(-1, 0, 0); if (err < 0) goto out; err = SLkp_init(); if (err < 0) { pr_err("TUI initialization failed.\n"); goto out; } SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB); signal(SIGSEGV, ui__signal_backtrace); signal(SIGFPE, ui__signal_backtrace); signal(SIGINT, ui__signal); signal(SIGQUIT, ui__signal); signal(SIGTERM, ui__signal); perf_error__register(&perf_tui_eops); ui_helpline__init(); ui_browser__init(); tui_progress__init(); hist_browser__init_hpp(); out: return err; } void ui__exit(bool wait_for_ok) { if (wait_for_ok && tui_helpline__set) ui__question_window("Fatal Error", ui_helpline__last_msg, "Press any key...", 0); SLtt_set_cursor_visibility(1); SLsmg_refresh(); SLsmg_reset_smg(); SLang_reset_tty(); perf_error__unregister(&perf_tui_eops); }
gpl-2.0
gazoo74/linux
kernel/locking/test-ww_mutex.c
279
12550
// SPDX-License-Identifier: GPL-2.0-or-later /* * Module-based API test facility for ww_mutexes */ #include <linux/kernel.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ww_mutex.h> static DEFINE_WD_CLASS(ww_class); struct workqueue_struct *wq; struct test_mutex { struct work_struct work; struct ww_mutex mutex; struct completion ready, go, done; unsigned int flags; }; #define TEST_MTX_SPIN BIT(0) #define TEST_MTX_TRY BIT(1) #define TEST_MTX_CTX BIT(2) #define __TEST_MTX_LAST BIT(3) static void test_mutex_work(struct work_struct *work) { struct test_mutex *mtx = container_of(work, typeof(*mtx), work); complete(&mtx->ready); wait_for_completion(&mtx->go); if (mtx->flags & TEST_MTX_TRY) { while (!ww_mutex_trylock(&mtx->mutex)) cond_resched(); } else { ww_mutex_lock(&mtx->mutex, NULL); } complete(&mtx->done); ww_mutex_unlock(&mtx->mutex); } static int __test_mutex(unsigned int flags) { #define TIMEOUT (HZ / 16) struct test_mutex mtx; struct ww_acquire_ctx ctx; int ret; ww_mutex_init(&mtx.mutex, &ww_class); ww_acquire_init(&ctx, &ww_class); INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); init_completion(&mtx.ready); init_completion(&mtx.go); init_completion(&mtx.done); mtx.flags = flags; schedule_work(&mtx.work); wait_for_completion(&mtx.ready); ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL); complete(&mtx.go); if (flags & TEST_MTX_SPIN) { unsigned long timeout = jiffies + TIMEOUT; ret = 0; do { if (completion_done(&mtx.done)) { ret = -EINVAL; break; } cond_resched(); } while (time_before(jiffies, timeout)); } else { ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); } ww_mutex_unlock(&mtx.mutex); ww_acquire_fini(&ctx); if (ret) { pr_err("%s(flags=%x): mutual exclusion failure\n", __func__, flags); ret = -EINVAL; } flush_work(&mtx.work); destroy_work_on_stack(&mtx.work); return ret; #undef TIMEOUT } static int test_mutex(void) { int ret; int i; for (i = 0; i < __TEST_MTX_LAST; i++) { ret = __test_mutex(i); if (ret) return ret; } return 0; } static int test_aa(void) { struct ww_mutex mutex; struct ww_acquire_ctx ctx; int ret; ww_mutex_init(&mutex, &ww_class); ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&mutex, &ctx); if (ww_mutex_trylock(&mutex)) { pr_err("%s: trylocked itself!\n", __func__); ww_mutex_unlock(&mutex); ret = -EINVAL; goto out; } ret = ww_mutex_lock(&mutex, &ctx); if (ret != -EALREADY) { pr_err("%s: missed deadlock for recursing, ret=%d\n", __func__, ret); if (!ret) ww_mutex_unlock(&mutex); ret = -EINVAL; goto out; } ret = 0; out: ww_mutex_unlock(&mutex); ww_acquire_fini(&ctx); return ret; } struct test_abba { struct work_struct work; struct ww_mutex a_mutex; struct ww_mutex b_mutex; struct completion a_ready; struct completion b_ready; bool resolve; int result; }; static void test_abba_work(struct work_struct *work) { struct test_abba *abba = container_of(work, typeof(*abba), work); struct ww_acquire_ctx ctx; int err; ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&abba->b_mutex, &ctx); complete(&abba->b_ready); wait_for_completion(&abba->a_ready); err = ww_mutex_lock(&abba->a_mutex, &ctx); if (abba->resolve && err == -EDEADLK) { ww_mutex_unlock(&abba->b_mutex); ww_mutex_lock_slow(&abba->a_mutex, &ctx); err = ww_mutex_lock(&abba->b_mutex, &ctx); } if (!err) ww_mutex_unlock(&abba->a_mutex); ww_mutex_unlock(&abba->b_mutex); ww_acquire_fini(&ctx); abba->result = err; } static int test_abba(bool resolve) { struct test_abba abba; struct ww_acquire_ctx ctx; int err, ret; ww_mutex_init(&abba.a_mutex, &ww_class); ww_mutex_init(&abba.b_mutex, &ww_class); INIT_WORK_ONSTACK(&abba.work, test_abba_work); init_completion(&abba.a_ready); init_completion(&abba.b_ready); abba.resolve = resolve; schedule_work(&abba.work); ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&abba.a_mutex, &ctx); complete(&abba.a_ready); wait_for_completion(&abba.b_ready); err = ww_mutex_lock(&abba.b_mutex, &ctx); if (resolve && err == -EDEADLK) { ww_mutex_unlock(&abba.a_mutex); ww_mutex_lock_slow(&abba.b_mutex, &ctx); err = ww_mutex_lock(&abba.a_mutex, &ctx); } if (!err) ww_mutex_unlock(&abba.b_mutex); ww_mutex_unlock(&abba.a_mutex); ww_acquire_fini(&ctx); flush_work(&abba.work); destroy_work_on_stack(&abba.work); ret = 0; if (resolve) { if (err || abba.result) { pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n", __func__, err, abba.result); ret = -EINVAL; } } else { if (err != -EDEADLK && abba.result != -EDEADLK) { pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n", __func__, err, abba.result); ret = -EINVAL; } } return ret; } struct test_cycle { struct work_struct work; struct ww_mutex a_mutex; struct ww_mutex *b_mutex; struct completion *a_signal; struct completion b_signal; int result; }; static void test_cycle_work(struct work_struct *work) { struct test_cycle *cycle = container_of(work, typeof(*cycle), work); struct ww_acquire_ctx ctx; int err, erra = 0; ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&cycle->a_mutex, &ctx); complete(cycle->a_signal); wait_for_completion(&cycle->b_signal); err = ww_mutex_lock(cycle->b_mutex, &ctx); if (err == -EDEADLK) { err = 0; ww_mutex_unlock(&cycle->a_mutex); ww_mutex_lock_slow(cycle->b_mutex, &ctx); erra = ww_mutex_lock(&cycle->a_mutex, &ctx); } if (!err) ww_mutex_unlock(cycle->b_mutex); if (!erra) ww_mutex_unlock(&cycle->a_mutex); ww_acquire_fini(&ctx); cycle->result = err ?: erra; } static int __test_cycle(unsigned int nthreads) { struct test_cycle *cycles; unsigned int n, last = nthreads - 1; int ret; cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); if (!cycles) return -ENOMEM; for (n = 0; n < nthreads; n++) { struct test_cycle *cycle = &cycles[n]; ww_mutex_init(&cycle->a_mutex, &ww_class); if (n == last) cycle->b_mutex = &cycles[0].a_mutex; else cycle->b_mutex = &cycles[n + 1].a_mutex; if (n == 0) cycle->a_signal = &cycles[last].b_signal; else cycle->a_signal = &cycles[n - 1].b_signal; init_completion(&cycle->b_signal); INIT_WORK(&cycle->work, test_cycle_work); cycle->result = 0; } for (n = 0; n < nthreads; n++) queue_work(wq, &cycles[n].work); flush_workqueue(wq); ret = 0; for (n = 0; n < nthreads; n++) { struct test_cycle *cycle = &cycles[n]; if (!cycle->result) continue; pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n", n, nthreads, cycle->result); ret = -EINVAL; break; } for (n = 0; n < nthreads; n++) ww_mutex_destroy(&cycles[n].a_mutex); kfree(cycles); return ret; } static int test_cycle(unsigned int ncpus) { unsigned int n; int ret; for (n = 2; n <= ncpus + 1; n++) { ret = __test_cycle(n); if (ret) return ret; } return 0; } struct stress { struct work_struct work; struct ww_mutex *locks; unsigned long timeout; int nlocks; }; static int *get_random_order(int count) { int *order; int n, r, tmp; order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); if (!order) return order; for (n = 0; n < count; n++) order[n] = n; for (n = count - 1; n > 1; n--) { r = get_random_int() % (n + 1); if (r != n) { tmp = order[n]; order[n] = order[r]; order[r] = tmp; } } return order; } static void dummy_load(struct stress *stress) { usleep_range(1000, 2000); } static void stress_inorder_work(struct work_struct *work) { struct stress *stress = container_of(work, typeof(*stress), work); const int nlocks = stress->nlocks; struct ww_mutex *locks = stress->locks; struct ww_acquire_ctx ctx; int *order; order = get_random_order(nlocks); if (!order) return; do { int contended = -1; int n, err; ww_acquire_init(&ctx, &ww_class); retry: err = 0; for (n = 0; n < nlocks; n++) { if (n == contended) continue; err = ww_mutex_lock(&locks[order[n]], &ctx); if (err < 0) break; } if (!err) dummy_load(stress); if (contended > n) ww_mutex_unlock(&locks[order[contended]]); contended = n; while (n--) ww_mutex_unlock(&locks[order[n]]); if (err == -EDEADLK) { ww_mutex_lock_slow(&locks[order[contended]], &ctx); goto retry; } if (err) { pr_err_once("stress (%s) failed with %d\n", __func__, err); break; } ww_acquire_fini(&ctx); } while (!time_after(jiffies, stress->timeout)); kfree(order); kfree(stress); } struct reorder_lock { struct list_head link; struct ww_mutex *lock; }; static void stress_reorder_work(struct work_struct *work) { struct stress *stress = container_of(work, typeof(*stress), work); LIST_HEAD(locks); struct ww_acquire_ctx ctx; struct reorder_lock *ll, *ln; int *order; int n, err; order = get_random_order(stress->nlocks); if (!order) return; for (n = 0; n < stress->nlocks; n++) { ll = kmalloc(sizeof(*ll), GFP_KERNEL); if (!ll) goto out; ll->lock = &stress->locks[order[n]]; list_add(&ll->link, &locks); } kfree(order); order = NULL; do { ww_acquire_init(&ctx, &ww_class); list_for_each_entry(ll, &locks, link) { err = ww_mutex_lock(ll->lock, &ctx); if (!err) continue; ln = ll; list_for_each_entry_continue_reverse(ln, &locks, link) ww_mutex_unlock(ln->lock); if (err != -EDEADLK) { pr_err_once("stress (%s) failed with %d\n", __func__, err); break; } ww_mutex_lock_slow(ll->lock, &ctx); list_move(&ll->link, &locks); /* restarts iteration */ } dummy_load(stress); list_for_each_entry(ll, &locks, link) ww_mutex_unlock(ll->lock); ww_acquire_fini(&ctx); } while (!time_after(jiffies, stress->timeout)); out: list_for_each_entry_safe(ll, ln, &locks, link) kfree(ll); kfree(order); kfree(stress); } static void stress_one_work(struct work_struct *work) { struct stress *stress = container_of(work, typeof(*stress), work); const int nlocks = stress->nlocks; struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks); int err; do { err = ww_mutex_lock(lock, NULL); if (!err) { dummy_load(stress); ww_mutex_unlock(lock); } else { pr_err_once("stress (%s) failed with %d\n", __func__, err); break; } } while (!time_after(jiffies, stress->timeout)); kfree(stress); } #define STRESS_INORDER BIT(0) #define STRESS_REORDER BIT(1) #define STRESS_ONE BIT(2) #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE) static int stress(int nlocks, int nthreads, unsigned int flags) { struct ww_mutex *locks; int n; locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL); if (!locks) return -ENOMEM; for (n = 0; n < nlocks; n++) ww_mutex_init(&locks[n], &ww_class); for (n = 0; nthreads; n++) { struct stress *stress; void (*fn)(struct work_struct *work); fn = NULL; switch (n & 3) { case 0: if (flags & STRESS_INORDER) fn = stress_inorder_work; break; case 1: if (flags & STRESS_REORDER) fn = stress_reorder_work; break; case 2: if (flags & STRESS_ONE) fn = stress_one_work; break; } if (!fn) continue; stress = kmalloc(sizeof(*stress), GFP_KERNEL); if (!stress) break; INIT_WORK(&stress->work, fn); stress->locks = locks; stress->nlocks = nlocks; stress->timeout = jiffies + 2*HZ; queue_work(wq, &stress->work); nthreads--; } flush_workqueue(wq); for (n = 0; n < nlocks; n++) ww_mutex_destroy(&locks[n]); kfree(locks); return 0; } static int __init test_ww_mutex_init(void) { int ncpus = num_online_cpus(); int ret; wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); if (!wq) return -ENOMEM; ret = test_mutex(); if (ret) return ret; ret = test_aa(); if (ret) return ret; ret = test_abba(false); if (ret) return ret; ret = test_abba(true); if (ret) return ret; ret = test_cycle(ncpus); if (ret) return ret; ret = stress(16, 2*ncpus, STRESS_INORDER); if (ret) return ret; ret = stress(16, 2*ncpus, STRESS_REORDER); if (ret) return ret; ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL); if (ret) return ret; return 0; } static void __exit test_ww_mutex_exit(void) { destroy_workqueue(wq); } module_init(test_ww_mutex_init); module_exit(test_ww_mutex_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Intel Corporation");
gpl-2.0
mirjak/linux-accecn
arch/arm/mach-vexpress/tc2_pm.c
279
7509
/* * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support * * Created by: Nicolas Pitre, October 2012 * Copyright: (C) 2012-2013 Linaro Limited * * Some portions of this file were originally written by Achin Gupta * Copyright: (C) 2012 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/errno.h> #include <linux/irqchip/arm-gic.h> #include <asm/mcpm.h> #include <asm/proc-fns.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/cp15.h> #include <linux/arm-cci.h> #include "spc.h" /* SCC conf registers */ #define RESET_CTRL 0x018 #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) #define A15_CONF 0x400 #define A7_CONF 0x500 #define SYS_INFO 0x700 #define SPC_BASE 0xb00 static void __iomem *scc; #define TC2_CLUSTERS 2 #define TC2_MAX_CPUS_PER_CLUSTER 3 static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) { pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) return -EINVAL; ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point)); ve_spc_cpu_wakeup_irq(cluster, cpu, true); return 0; } static int tc2_pm_cluster_powerup(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); if (cluster >= TC2_CLUSTERS) return -EINVAL; ve_spc_powerdown(cluster, false); return 0; } static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) { pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); ve_spc_cpu_wakeup_irq(cluster, cpu, true); /* * If the CPU is committed to power down, make sure * the power controller will be in charge of waking it * up upon IRQ, ie IRQ lines are cut from GIC CPU IF * to the CPU by disabling the GIC CPU IF to prevent wfi * from completing execution behind power controller back */ gic_cpu_if_down(0); } static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); BUG_ON(cluster >= TC2_CLUSTERS); ve_spc_powerdown(cluster, true); ve_spc_global_wakeup_irq(true); } static void tc2_pm_cpu_cache_disable(void) { v7_exit_coherency_flush(louis); } static void tc2_pm_cluster_cache_disable(void) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { /* * On the Cortex-A15 we need to disable * L2 prefetching before flushing the cache. */ asm volatile( "mcr p15, 1, %0, c15, c0, 3 \n\t" "isb \n\t" "dsb " : : "r" (0x400) ); } v7_exit_coherency_flush(all); cci_disable_port_by_cpu(read_cpuid_mpidr()); } static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) { u32 mask = cluster ? RESET_A7_NCORERESET(cpu) : RESET_A15_NCORERESET(cpu); return !(readl_relaxed(scc + RESET_CTRL) & mask); } #define POLL_MSEC 10 #define TIMEOUT_MSEC 1000 static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) { unsigned tries; pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", __func__, cpu, cluster, readl_relaxed(scc + RESET_CTRL)); /* * We need the CPU to reach WFI, but the power * controller may put the cluster in reset and * power it off as soon as that happens, before * we have a chance to see STANDBYWFI. * * So we need to check for both conditions: */ if (tc2_core_in_reset(cpu, cluster) || ve_spc_cpu_in_wfi(cpu, cluster)) return 0; /* success: the CPU is halted */ /* Otherwise, wait and retry: */ msleep(POLL_MSEC); } return -ETIMEDOUT; /* timeout */ } static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) { ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point)); } static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) { pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); ve_spc_cpu_wakeup_irq(cluster, cpu, false); ve_spc_set_resume_addr(cluster, cpu, 0); } static void tc2_pm_cluster_is_up(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); BUG_ON(cluster >= TC2_CLUSTERS); ve_spc_powerdown(cluster, false); ve_spc_global_wakeup_irq(false); } static const struct mcpm_platform_ops tc2_pm_power_ops = { .cpu_powerup = tc2_pm_cpu_powerup, .cluster_powerup = tc2_pm_cluster_powerup, .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare, .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare, .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare, .cpu_cache_disable = tc2_pm_cpu_cache_disable, .cluster_cache_disable = tc2_pm_cluster_cache_disable, .wait_for_powerdown = tc2_pm_wait_for_powerdown, .cpu_is_up = tc2_pm_cpu_is_up, .cluster_is_up = tc2_pm_cluster_is_up, }; /* * Enable cluster-level coherency, in preparation for turning on the MMU. */ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) { asm volatile (" \n" " cmp r0, #1 \n" " bxne lr \n" " b cci_enable_port_for_self "); } static int __init tc2_pm_init(void) { unsigned int mpidr, cpu, cluster; int ret, irq; u32 a15_cluster_id, a7_cluster_id, sys_info; struct device_node *np; /* * The power management-related features are hidden behind * SCC registers. We need to extract runtime information like * cluster ids and number of CPUs really available in clusters. */ np = of_find_compatible_node(NULL, NULL, "arm,vexpress-scc,v2p-ca15_a7"); scc = of_iomap(np, 0); if (!scc) return -ENODEV; a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf; a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf; if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS) return -EINVAL; sys_info = readl_relaxed(scc + SYS_INFO); tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf; tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf; irq = irq_of_parse_and_map(np, 0); /* * A subset of the SCC registers is also used to communicate * with the SPC (power controller). We need to be able to * drive it very early in the boot process to power up * processors, so we initialize the SPC driver here. */ ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq); if (ret) return ret; if (!cci_probed()) return -ENODEV; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { pr_err("%s: boot CPU is out of bound!\n", __func__); return -EINVAL; } ret = mcpm_platform_register(&tc2_pm_power_ops); if (!ret) { mcpm_sync_init(tc2_pm_power_up_setup); /* test if we can (re)enable the CCI on our own */ BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0); pr_info("TC2 power management initialized\n"); } return ret; } early_initcall(tc2_pm_init);
gpl-2.0
trifuse/ProjectP
dep/ACE_wrappers/ace/System_Time.cpp
535
3906
// $Id: System_Time.cpp 91286 2010-08-05 09:04:31Z johnnyw $ #include "ace/System_Time.h" #include "ace/MMAP_Memory_Pool.h" #include "ace/Malloc_T.h" #include "ace/Null_Mutex.h" #include "ace/Time_Value.h" #include "ace/OS_NS_string.h" #include "ace/OS_NS_time.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_System_Time::ACE_System_Time (const ACE_TCHAR *poolname) : shmem_ (0) , delta_time_ (0) { ACE_TRACE ("ACE_System_Time::ACE_System_Time"); // Only create a new unique filename for the memory pool file // if the user didn't supply one... if (poolname == 0) { #if defined (ACE_DEFAULT_BACKING_STORE) // Create a temporary file. ACE_OS::strcpy (this->poolname_, ACE_DEFAULT_BACKING_STORE); #else /* ACE_DEFAULT_BACKING_STORE */ if (ACE::get_temp_dir (this->poolname_, MAXPATHLEN - 17) == -1) // -17 for ace-malloc-XXXXXX { ACE_ERROR ((LM_ERROR, ACE_TEXT ("Temporary path too long, ") ACE_TEXT ("defaulting to current directory\n"))); this->poolname_[0] = 0; } // Add the filename to the end ACE_OS::strcat (this->poolname_, ACE_TEXT ("ace-malloc-XXXXXX")); #endif /* ACE_DEFAULT_BACKING_STORE */ } else ACE_OS::strsncpy (this->poolname_, poolname, (sizeof this->poolname_ / sizeof (ACE_TCHAR))); ACE_NEW (this->shmem_, ALLOCATOR (this->poolname_)); } ACE_System_Time::~ACE_System_Time (void) { ACE_TRACE ("ACE_System_Time::~ACE_System_Time"); delete this->shmem_; } // Get the local system time. int ACE_System_Time::get_local_system_time (time_t & time_out) { ACE_TRACE ("ACE_System_Time::get_local_system_time"); time_out = ACE_OS::time (0); return 0; } int ACE_System_Time::get_local_system_time (ACE_Time_Value &time_out) { ACE_TRACE ("ACE_System_Time::get_local_system_time"); time_out.set (ACE_OS::time (0), 0); return 0; } // Get the system time of the central time server. int ACE_System_Time::get_master_system_time (time_t &time_out) { ACE_TRACE ("ACE_System_Time::get_master_system_time"); if (this->delta_time_ == 0) { // Try to find it void * temp = 0; if (this->shmem_->find (ACE_DEFAULT_TIME_SERVER_STR, temp) == -1) { // No time entry in shared memory (meaning no Clerk exists) // so return the local time of the host. return this->get_local_system_time (time_out); } else // Extract the delta time. this->delta_time_ = static_cast<long *> (temp); } time_t local_time; // If delta_time is positive, it means that the system clock is // ahead of our local clock so add delta to the local time to get an // approximation of the system time. Else if delta time is negative, // it means that our local clock is ahead of the system clock, so // return the last local time stored (to avoid time conflicts). if (*this->delta_time_ >= 0 ) { this->get_local_system_time (local_time); time_out = local_time + static_cast<ACE_UINT32> (*this->delta_time_); } else // Return the last local time. Note that this is stored as the // second field in shared memory. time_out = *(this->delta_time_ + 1); return 0; } int ACE_System_Time::get_master_system_time (ACE_Time_Value &time_out) { ACE_TRACE ("ACE_System_Time::get_master_system_time"); time_t to; if (this->get_master_system_time (to) == -1) return -1; time_out.sec (to); return 0; } // Synchronize local system time with the central time server using // specified mode (currently unimplemented). int ACE_System_Time::sync_local_system_time (ACE_System_Time::Sync_Mode) { ACE_TRACE ("ACE_System_Time::sync_local_system_time"); ACE_NOTSUP_RETURN (-1); } ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
javelinanddart/shamu
drivers/ata/acard-ahci.c
2071
12906
/* * acard-ahci.c - ACard AHCI SATA support * * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2010 Red Hat, Inc. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * AHCI hardware documentation: * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/dmi.h> #include <linux/gfp.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> #include "ahci.h" #define DRV_NAME "acard-ahci" #define DRV_VERSION "1.0" /* Received FIS structure limited to 80h. */ #define ACARD_AHCI_RX_FIS_SZ 128 enum { AHCI_PCI_BAR = 5, }; enum board_ids { board_acard_ahci, }; struct acard_sg { __le32 addr; __le32 addr_hi; __le32 reserved; __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ }; static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int acard_ahci_port_start(struct ata_port *ap); static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); #ifdef CONFIG_PM static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); static int acard_ahci_pci_device_resume(struct pci_dev *pdev); #endif static struct scsi_host_template acard_ahci_sht = { AHCI_SHT("acard-ahci"), }; static struct ata_port_operations acard_ops = { .inherits = &ahci_ops, .qc_prep = acard_ahci_qc_prep, .qc_fill_rtf = acard_ahci_qc_fill_rtf, .port_start = acard_ahci_port_start, }; #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) static const struct ata_port_info acard_ahci_port_info[] = { [board_acard_ahci] = { AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &acard_ops, }, }; static const struct pci_device_id acard_ahci_pci_tbl[] = { /* ACard */ { PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */ { } /* terminate list */ }; static struct pci_driver acard_ahci_pci_driver = { .name = DRV_NAME, .id_table = acard_ahci_pci_tbl, .probe = acard_ahci_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = acard_ahci_pci_device_suspend, .resume = acard_ahci_pci_device_resume, #endif }; #ifdef CONFIG_PM static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->mmio; u32 ctl; if (mesg.event & PM_EVENT_SUSPEND && hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { dev_err(&pdev->dev, "BIOS update required for suspend/resume\n"); return -EIO; } if (mesg.event & PM_EVENT_SLEEP) { /* AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a * transition of the HBA to D3 state. */ ctl = readl(mmio + HOST_CTL); ctl &= ~HOST_IRQ_EN; writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ } return ata_pci_device_suspend(pdev, mesg); } static int acard_ahci_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { rc = ahci_reset_controller(host); if (rc) return rc; ahci_init_controller(host); } ata_host_resume(host); return 0; } #endif static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) { int rc; if (using_dac && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "64-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return 0; } static void acard_ahci_pci_print_info(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); u16 cc; const char *scc_s; pci_read_config_word(pdev, 0x0a, &cc); if (cc == PCI_CLASS_STORAGE_IDE) scc_s = "IDE"; else if (cc == PCI_CLASS_STORAGE_SATA) scc_s = "SATA"; else if (cc == PCI_CLASS_STORAGE_RAID) scc_s = "RAID"; else scc_s = "unknown"; ahci_print_info(host, scc_s); } static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) { struct scatterlist *sg; struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; unsigned int si, last_si = 0; VPRINTK("ENTER\n"); /* * Next, the S/G list. */ for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); /* * ACard note: * We must set an end-of-table (EOT) bit, * and the segment cannot exceed 64k (0x10000) */ acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff); acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); acard_sg[si].size = cpu_to_le32(sg_len); last_si = si; } acard_sg[last_si].size |= cpu_to_le32(1 << 31); /* set EOT */ return si; } static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ahci_port_priv *pp = ap->private_data; int is_atapi = ata_is_atapi(qc->tf.protocol); void *cmd_tbl; u32 opts; const u32 cmd_fis_len = 5; /* five dwords */ unsigned int n_elem; /* * Fill in command table information. First, the header, * a SATA Register - Host to Device command FIS. */ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); if (is_atapi) { memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); } n_elem = 0; if (qc->flags & ATA_QCFLAG_DMAMAP) n_elem = acard_ahci_fill_sg(qc, cmd_tbl); /* * Fill in command slot information. * * ACard note: prd table length not filled in */ opts = cmd_fis_len | (qc->dev->link->pmp << 12); if (qc->tf.flags & ATA_TFLAG_WRITE) opts |= AHCI_CMD_WRITE; if (is_atapi) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ahci_fill_cmd_slot(pp, qc->tag, opts); } static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) { struct ahci_port_priv *pp = qc->ap->private_data; u8 *rx_fis = pp->rx_fis; if (pp->fbs_enabled) rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ; /* * After a successful execution of an ATA PIO data-in command, * the device doesn't send D2H Reg FIS to update the TF and * the host should take TF and E_Status from the preceding PIO * Setup FIS. */ if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && !(qc->flags & ATA_QCFLAG_FAILED)) { ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; } else ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); return true; } static int acard_ahci_port_start(struct ata_port *ap) { struct ahci_host_priv *hpriv = ap->host->private_data; struct device *dev = ap->host->dev; struct ahci_port_priv *pp; void *mem; dma_addr_t mem_dma; size_t dma_sz, rx_fis_sz; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; /* check FBS capability */ if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { void __iomem *port_mmio = ahci_port_base(ap); u32 cmd = readl(port_mmio + PORT_CMD); if (cmd & PORT_CMD_FBSCP) pp->fbs_supported = true; else if (hpriv->flags & AHCI_HFLAG_YES_FBS) { dev_info(dev, "port %d can do FBS, forcing FBSCP\n", ap->port_no); pp->fbs_supported = true; } else dev_warn(dev, "port %d is not capable of FBS\n", ap->port_no); } if (pp->fbs_supported) { dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16; } else { dma_sz = AHCI_PORT_PRIV_DMA_SZ; rx_fis_sz = ACARD_AHCI_RX_FIS_SZ; } mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); if (!mem) return -ENOMEM; memset(mem, 0, dma_sz); /* * First item in chunk of DMA memory: 32-slot command table, * 32 bytes each in size */ pp->cmd_slot = mem; pp->cmd_slot_dma = mem_dma; mem += AHCI_CMD_SLOT_SZ; mem_dma += AHCI_CMD_SLOT_SZ; /* * Second item: Received-FIS area */ pp->rx_fis = mem; pp->rx_fis_dma = mem_dma; mem += rx_fis_sz; mem_dma += rx_fis_sz; /* * Third item: data area for storing a single command * and its scatter-gather table */ pp->cmd_tbl = mem; pp->cmd_tbl_dma = mem_dma; /* * Save off initial list of interrupts to be enabled. * This could be changed later */ pp->intr_mask = DEF_PORT_IRQ; ap->private_data = pp; /* engage engines, captain */ return ahci_port_resume(ap); } static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_id = ent->driver_data; struct ata_port_info pi = acard_ahci_port_info[board_id]; const struct ata_port_info *ppi[] = { &pi, NULL }; struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; struct ata_host *host; int n_ports, i, rc; VPRINTK("ENTER\n"); WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS); ata_print_version_once(&pdev->dev, DRV_VERSION); /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) return rc; /* AHCI controllers often implement SFF compatible interface. * Grab all PCI BARs just in case. */ rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; hpriv->flags |= (unsigned long)pi.private_data; if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) pci_enable_msi(pdev); hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; /* save initial config */ ahci_save_initial_config(&pdev->dev, hpriv, 0, 0); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; ahci_set_em_messages(hpriv, &pi); /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); if (!host) return -ENOMEM; host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; else printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); ata_port_pbar_desc(ap, AHCI_PCI_BAR, 0x100 + ap->port_no * 0x80, "port"); /* set initial link pm policy */ /* ap->pm_policy = NOT_AVAILABLE; */ /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } /* initialize adapter */ rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); if (rc) return rc; rc = ahci_reset_controller(host); if (rc) return rc; ahci_init_controller(host); acard_ahci_pci_print_info(host); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, &acard_ahci_sht); } module_pci_driver(acard_ahci_pci_driver); MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("ACard AHCI SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
dtsinc/DTS-Sound-Integration_CAF-Android-kernel
drivers/net/wireless/brcm80211/brcmsmac/channel.c
2327
21373
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <net/regulatory.h> #include <defs.h> #include "pub.h" #include "phy/phy_hal.h" #include "main.h" #include "stf.h" #include "channel.h" #include "mac80211_if.h" #include "debug.h" /* QDB() macro takes a dB value and converts to a quarter dB value */ #define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR) #define LOCALE_MIMO_IDX_bn 0 #define LOCALE_MIMO_IDX_11n 0 /* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */ #define BRCMS_MAXPWR_MIMO_TBL_SIZE 14 /* maxpwr mapping to 5GHz band channels: * maxpwr[0] - channels [34-48] * maxpwr[1] - channels [52-60] * maxpwr[2] - channels [62-64] * maxpwr[3] - channels [100-140] * maxpwr[4] - channels [149-165] */ #define BAND_5G_PWR_LVLS 5 /* 5 power levels for 5G */ #define LC(id) LOCALE_MIMO_IDX_ ## id #define LOCALES(mimo2, mimo5) \ {LC(mimo2), LC(mimo5)} /* macro to get 5 GHz channel group index for tx power */ #define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \ (((c) < 62) ? 1 : \ (((c) < 100) ? 2 : \ (((c) < 149) ? 3 : 4)))) #define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0) #define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_DFS | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_DFS | \ NL80211_RRF_NO_IBSS) #define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) static const struct ieee80211_regdomain brcms_regdom_x2 = { .n_reg_rules = 6, .alpha2 = "X2", .reg_rules = { BRCM_2GHZ_2412_2462, BRCM_2GHZ_2467_2472, BRCM_5GHZ_5180_5240, BRCM_5GHZ_5260_5320, BRCM_5GHZ_5500_5700, BRCM_5GHZ_5745_5825, } }; /* locale per-channel tx power limits for MIMO frames * maxpwr arrays are index by channel for 2.4 GHz limits, and * by sub-band for 5 GHz limits using CHANNEL_POWER_IDX_5G(channel) */ struct locale_mimo_info { /* tx 20 MHz power limits, qdBm units */ s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE]; /* tx 40 MHz power limits, qdBm units */ s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE]; }; /* Country names and abbreviations with locale defined from ISO 3166 */ struct country_info { const u8 locale_mimo_2G; /* 2.4G mimo info */ const u8 locale_mimo_5G; /* 5G mimo info */ }; struct brcms_regd { struct country_info country; const struct ieee80211_regdomain *regdomain; }; struct brcms_cm_info { struct brcms_pub *pub; struct brcms_c_info *wlc; const struct brcms_regd *world_regd; }; /* * MIMO Locale Definitions - 2.4 GHz */ static const struct locale_mimo_info locale_bn = { {QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13)}, {0, 0, QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), 0, 0}, }; static const struct locale_mimo_info *g_mimo_2g_table[] = { &locale_bn }; /* * MIMO Locale Definitions - 5 GHz */ static const struct locale_mimo_info locale_11n = { { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)}, {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)}, }; static const struct locale_mimo_info *g_mimo_5g_table[] = { &locale_11n }; static const struct brcms_regd cntry_locales[] = { /* Worldwide RoW 2, must always be at index 0 */ { .country = LOCALES(bn, 11n), .regdomain = &brcms_regdom_x2, }, }; static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) return NULL; return g_mimo_2g_table[locale_idx]; } static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx) { if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table)) return NULL; return g_mimo_5g_table[locale_idx]; } /* * Indicates whether the country provided is valid to pass * to cfg80211 or not. * * returns true if valid; false if not. */ static bool brcms_c_country_valid(const char *ccode) { /* * only allow ascii alpha uppercase for the first 2 * chars. */ if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A && (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A)) return false; /* * do not match ISO 3166-1 user assigned country codes * that may be in the driver table */ if (!strcmp("AA", ccode) || /* AA */ !strcmp("ZZ", ccode) || /* ZZ */ ccode[0] == 'X' || /* XA - XZ */ (ccode[0] == 'Q' && /* QM - QZ */ (ccode[1] >= 'M' && ccode[1] <= 'Z'))) return false; if (!strcmp("NA", ccode)) return false; return true; } static const struct brcms_regd *brcms_world_regd(const char *regdom, int len) { const struct brcms_regd *regd = NULL; int i; for (i = 0; i < ARRAY_SIZE(cntry_locales); i++) { if (!strncmp(regdom, cntry_locales[i].regdomain->alpha2, len)) { regd = &cntry_locales[i]; break; } } return regd; } static const struct brcms_regd *brcms_default_world_regd(void) { return &cntry_locales[0]; } /* JP, J1 - J10 are Japan ccodes */ static bool brcms_c_japan_ccode(const char *ccode) { return (ccode[0] == 'J' && (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9'))); } static void brcms_c_channel_min_txpower_limits_with_local_constraint( struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr, u8 local_constraint_qdbm) { int j; /* CCK Rates */ for (j = 0; j < WL_TX_POWER_CCK_NUM; j++) txpwr->cck[j] = min(txpwr->cck[j], local_constraint_qdbm); /* 20 MHz Legacy OFDM SISO */ for (j = 0; j < WL_TX_POWER_OFDM_NUM; j++) txpwr->ofdm[j] = min(txpwr->ofdm[j], local_constraint_qdbm); /* 20 MHz Legacy OFDM CDD */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_cdd[j] = min(txpwr->ofdm_cdd[j], local_constraint_qdbm); /* 40 MHz Legacy OFDM SISO */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_40_siso[j] = min(txpwr->ofdm_40_siso[j], local_constraint_qdbm); /* 40 MHz Legacy OFDM CDD */ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) txpwr->ofdm_40_cdd[j] = min(txpwr->ofdm_40_cdd[j], local_constraint_qdbm); /* 20MHz MCS 0-7 SISO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_siso[j] = min(txpwr->mcs_20_siso[j], local_constraint_qdbm); /* 20MHz MCS 0-7 CDD */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_cdd[j] = min(txpwr->mcs_20_cdd[j], local_constraint_qdbm); /* 20MHz MCS 0-7 STBC */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_20_stbc[j] = min(txpwr->mcs_20_stbc[j], local_constraint_qdbm); /* 20MHz MCS 8-15 MIMO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++) txpwr->mcs_20_mimo[j] = min(txpwr->mcs_20_mimo[j], local_constraint_qdbm); /* 40MHz MCS 0-7 SISO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_siso[j] = min(txpwr->mcs_40_siso[j], local_constraint_qdbm); /* 40MHz MCS 0-7 CDD */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_cdd[j] = min(txpwr->mcs_40_cdd[j], local_constraint_qdbm); /* 40MHz MCS 0-7 STBC */ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) txpwr->mcs_40_stbc[j] = min(txpwr->mcs_40_stbc[j], local_constraint_qdbm); /* 40MHz MCS 8-15 MIMO */ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++) txpwr->mcs_40_mimo[j] = min(txpwr->mcs_40_mimo[j], local_constraint_qdbm); /* 40MHz MCS 32 */ txpwr->mcs32 = min(txpwr->mcs32, local_constraint_qdbm); } /* * set the driver's current country and regulatory information * using a country code as the source. Look up built in country * information found with the country code. */ static void brcms_c_set_country(struct brcms_cm_info *wlc_cm, const struct brcms_regd *regd) { struct brcms_c_info *wlc = wlc_cm->wlc; if ((wlc->pub->_n_enab & SUPPORT_11N) != wlc->protection->nmode_user) brcms_c_set_nmode(wlc); brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]); brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]); brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); return; } struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc) { struct brcms_cm_info *wlc_cm; struct brcms_pub *pub = wlc->pub; struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom; const char *ccode = sprom->alpha2; int ccode_len = sizeof(sprom->alpha2); wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC); if (wlc_cm == NULL) return NULL; wlc_cm->pub = pub; wlc_cm->wlc = wlc; wlc->cmi = wlc_cm; /* store the country code for passing up as a regulatory hint */ wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len); if (brcms_c_country_valid(ccode)) strncpy(wlc->pub->srom_ccode, ccode, ccode_len); /* * If no custom world domain is found in the SROM, use the * default "X2" domain. */ if (!wlc_cm->world_regd) { wlc_cm->world_regd = brcms_default_world_regd(); ccode = wlc_cm->world_regd->regdomain->alpha2; ccode_len = BRCM_CNTRY_BUF_SZ - 1; } /* save default country for exiting 11d regulatory mode */ strncpy(wlc->country_default, ccode, ccode_len); /* initialize autocountry_default to driver default */ strncpy(wlc->autocountry_default, ccode, ccode_len); brcms_c_set_country(wlc_cm, wlc_cm->world_regd); return wlc_cm; } void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm) { kfree(wlc_cm); } void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, u8 local_constraint_qdbm) { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan; struct txpwr_limits txpwr; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); brcms_c_channel_min_txpower_limits_with_local_constraint( wlc_cm, &txpwr, local_constraint_qdbm ); /* set or restore gmode as required by regulatory */ if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); brcms_b_set_chanspec(wlc->hw, chanspec, !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN), &txpwr); } void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec, struct txpwr_limits *txpwr) { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan; uint i; uint chan; int maxpwr; int delta; const struct country_info *country; struct brcms_band *band; int conducted_max = BRCMS_TXPWR_MAX; const struct locale_mimo_info *li_mimo; int maxpwr20, maxpwr40; int maxpwr_idx; uint j; memset(txpwr, 0, sizeof(struct txpwr_limits)); if (WARN_ON(!ch)) return; country = &wlc_cm->world_regd->country; chan = CHSPEC_CHANNEL(chanspec); band = wlc->bandstate[chspec_bandunit(chanspec)]; li_mimo = (band->bandtype == BRCM_BAND_5G) ? brcms_c_get_mimo_5g(country->locale_mimo_5G) : brcms_c_get_mimo_2g(country->locale_mimo_2G); delta = band->antgain; if (band->bandtype == BRCM_BAND_2G) conducted_max = QDB(22); maxpwr = QDB(ch->max_power) - delta; maxpwr = max(maxpwr, 0); maxpwr = min(maxpwr, conducted_max); /* CCK txpwr limits for 2.4G band */ if (band->bandtype == BRCM_BAND_2G) { for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) txpwr->cck[i] = (u8) maxpwr; } for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) { txpwr->ofdm[i] = (u8) maxpwr; /* * OFDM 40 MHz SISO has the same power as the corresponding * MCS0-7 rate unless overriden by the locale specific code. * We set this value to 0 as a flag (presumably 0 dBm isn't * a possibility) and then copy the MCS0-7 value to the 40 MHz * value if it wasn't explicitly set. */ txpwr->ofdm_40_siso[i] = 0; txpwr->ofdm_cdd[i] = (u8) maxpwr; txpwr->ofdm_40_cdd[i] = 0; } delta = 0; if (band->antgain > QDB(6)) delta = band->antgain - QDB(6); /* Excess over 6 dB */ if (band->bandtype == BRCM_BAND_2G) maxpwr_idx = (chan - 1); else maxpwr_idx = CHANNEL_POWER_IDX_5G(chan); maxpwr20 = li_mimo->maxpwr20[maxpwr_idx]; maxpwr40 = li_mimo->maxpwr40[maxpwr_idx]; maxpwr20 = maxpwr20 - delta; maxpwr20 = max(maxpwr20, 0); maxpwr40 = maxpwr40 - delta; maxpwr40 = max(maxpwr40, 0); /* Fill in the MCS 0-7 (SISO) rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { /* * 20 MHz has the same power as the corresponding OFDM rate * unless overriden by the locale specific code. */ txpwr->mcs_20_siso[i] = txpwr->ofdm[i]; txpwr->mcs_40_siso[i] = 0; } /* Fill in the MCS 0-7 CDD rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_cdd[i] = (u8) maxpwr20; txpwr->mcs_40_cdd[i] = (u8) maxpwr40; } /* * These locales have SISO expressed in the * table and override CDD later */ if (li_mimo == &locale_bn) { if (li_mimo == &locale_bn) { maxpwr20 = QDB(16); maxpwr40 = 0; if (chan >= 3 && chan <= 11) maxpwr40 = QDB(16); } for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_siso[i] = (u8) maxpwr20; txpwr->mcs_40_siso[i] = (u8) maxpwr40; } } /* Fill in the MCS 0-7 STBC rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { txpwr->mcs_20_stbc[i] = 0; txpwr->mcs_40_stbc[i] = 0; } /* Fill in the MCS 8-15 SDM rates */ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) { txpwr->mcs_20_mimo[i] = (u8) maxpwr20; txpwr->mcs_40_mimo[i] = (u8) maxpwr40; } /* Fill in MCS32 */ txpwr->mcs32 = (u8) maxpwr40; for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) { if (txpwr->ofdm_40_cdd[i] == 0) txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j]; if (i == 0) { i = i + 1; if (txpwr->ofdm_40_cdd[i] == 0) txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j]; } } /* * Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO * value if it wasn't provided explicitly. */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { if (txpwr->mcs_40_siso[i] == 0) txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i]; } for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) { if (txpwr->ofdm_40_siso[i] == 0) txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j]; if (i == 0) { i = i + 1; if (txpwr->ofdm_40_siso[i] == 0) txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j]; } } /* * Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding * STBC values if they weren't provided explicitly. */ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) { if (txpwr->mcs_20_stbc[i] == 0) txpwr->mcs_20_stbc[i] = txpwr->mcs_20_cdd[i]; if (txpwr->mcs_40_stbc[i] == 0) txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i]; } return; } /* * Verify the chanspec is using a legal set of parameters, i.e. that the * chanspec specified a band, bw, ctl_sb and channel and that the * combination could be legal given any set of circumstances. * RETURNS: true is the chanspec is malformed, false if it looks good. */ static bool brcms_c_chspec_malformed(u16 chanspec) { /* must be 2G or 5G band */ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec)) return true; /* must be 20 or 40 bandwidth */ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec)) return true; /* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */ if (CHSPEC_IS20(chanspec)) { if (!CHSPEC_SB_NONE(chanspec)) return true; } else if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) { return true; } return false; } /* * Validate the chanspec for this locale, for 40MHZ we need to also * check that the sidebands are valid 20MZH channels in this locale * and they are also a legal HT combination */ static bool brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec) { struct brcms_c_info *wlc = wlc_cm->wlc; u8 channel = CHSPEC_CHANNEL(chspec); /* check the chanspec */ if (brcms_c_chspec_malformed(chspec)) { brcms_err(wlc->hw->d11core, "wl%d: malformed chanspec 0x%x\n", wlc->pub->unit, chspec); return false; } if (CHANNEL_BANDUNIT(wlc_cm->wlc, channel) != chspec_bandunit(chspec)) return false; return true; } bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec) { return brcms_c_valid_chanspec_ext(wlc_cm, chspec); } static bool brcms_is_radar_freq(u16 center_freq) { return center_freq >= 5260 && center_freq <= 5700; } static void brcms_reg_apply_radar_flags(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; int i; sband = wiphy->bands[IEEE80211_BAND_5GHZ]; if (!sband) return; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!brcms_is_radar_freq(ch->center_freq)) continue; /* * All channels in this range should be passive and have * DFS enabled. */ if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch->flags |= IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN; } } static void brcms_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; const struct ieee80211_reg_rule *rule; int band, i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (ch->flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR)) continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { rule = freq_reg_info(wiphy, ch->center_freq); if (IS_ERR(rule)) continue; if (!(rule->flags & NL80211_RRF_NO_IBSS)) ch->flags &= ~IEEE80211_CHAN_NO_IBSS; if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN)) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } else if (ch->beacon_found) { ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN); } } } } static void brcms_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct brcms_info *wl = hw->priv; struct brcms_c_info *wlc = wl->wlc; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; int band, i; bool ch_found = false; brcms_reg_apply_radar_flags(wiphy); if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) brcms_reg_apply_beaconing_flags(wiphy, request->initiator); /* Disable radio if all channels disallowed by regulatory */ for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; !ch_found && i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch_found = true; } } if (ch_found) { mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); } else { mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); brcms_err(wlc->hw->d11core, "wl%d: %s: no valid channel for \"%s\"\n", wlc->pub->unit, __func__, request->alpha2); } if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G) wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi, brcms_c_japan_ccode(request->alpha2)); } void brcms_c_regd_init(struct brcms_c_info *wlc) { struct wiphy *wiphy = wlc->wiphy; const struct brcms_regd *regd = wlc->cmi->world_regd; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; struct brcms_chanvec sup_chan; struct brcms_band *band; int band_idx, i; /* Disable any channels not supported by the phy */ for (band_idx = 0; band_idx < wlc->pub->_nbands; band_idx++) { band = wlc->bandstate[band_idx]; wlc_phy_chanspec_band_validch(band->pi, band->bandtype, &sup_chan); if (band_idx == BAND_2G_INDEX) sband = wiphy->bands[IEEE80211_BAND_2GHZ]; else sband = wiphy->bands[IEEE80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!isset(sup_chan.vec, ch->hw_value)) ch->flags |= IEEE80211_CHAN_DISABLED; } } wlc->wiphy->reg_notifier = brcms_reg_notifier; wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_STRICT_REGULATORY; wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain); brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER); }
gpl-2.0
SciAps/android-dm3730-kernel
drivers/rtc/rtc-sa1100.c
3095
9714
/* * Real Time Clock interface for StrongARM SA1x00 and XScale PXA2xx * * Copyright (c) 2000 Nils Faerber * * Based on rtc.c by Paul Gortmaker * * Original Driver by Nils Faerber <nils@kernelconcepts.de> * * Modifications from: * CIH <cih@coventive.com> * Nicolas Pitre <nico@fluxnic.net> * Andrew Christian <andrew.christian@hp.com> * * Converted to the RTC subsystem and Driver Model * by Richard Purdie <rpurdie@rpsys.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/pm.h> #include <linux/bitops.h> #include <mach/hardware.h> #include <asm/irq.h> #ifdef CONFIG_ARCH_PXA #include <mach/regs-rtc.h> #include <mach/regs-ost.h> #endif #define RTC_DEF_DIVIDER (32768 - 1) #define RTC_DEF_TRIM 0 static const unsigned long RTC_FREQ = 1024; static struct rtc_time rtc_alarm; static DEFINE_SPINLOCK(sa1100_rtc_lock); static inline int rtc_periodic_alarm(struct rtc_time *tm) { return (tm->tm_year == -1) || ((unsigned)tm->tm_mon >= 12) || ((unsigned)(tm->tm_mday - 1) >= 31) || ((unsigned)tm->tm_hour > 23) || ((unsigned)tm->tm_min > 59) || ((unsigned)tm->tm_sec > 59); } /* * Calculate the next alarm time given the requested alarm time mask * and the current time. */ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm) { unsigned long next_time; unsigned long now_time; next->tm_year = now->tm_year; next->tm_mon = now->tm_mon; next->tm_mday = now->tm_mday; next->tm_hour = alrm->tm_hour; next->tm_min = alrm->tm_min; next->tm_sec = alrm->tm_sec; rtc_tm_to_time(now, &now_time); rtc_tm_to_time(next, &next_time); if (next_time < now_time) { /* Advance one day */ next_time += 60 * 60 * 24; rtc_time_to_tm(next_time, next); } } static int rtc_update_alarm(struct rtc_time *alrm) { struct rtc_time alarm_tm, now_tm; unsigned long now, time; int ret; do { now = RCNR; rtc_time_to_tm(now, &now_tm); rtc_next_alarm_time(&alarm_tm, &now_tm, alrm); ret = rtc_tm_to_time(&alarm_tm, &time); if (ret != 0) break; RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL); RTAR = time; } while (now != RCNR); return ret; } static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id) { struct platform_device *pdev = to_platform_device(dev_id); struct rtc_device *rtc = platform_get_drvdata(pdev); unsigned int rtsr; unsigned long events = 0; spin_lock(&sa1100_rtc_lock); rtsr = RTSR; /* clear interrupt sources */ RTSR = 0; /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_probe(). */ if (rtsr & (RTSR_ALE | RTSR_HZE)) { /* This is the original code, before there was the if test * above. This code does not clear interrupts that were not * enabled. */ RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2); } else { /* For some reason, it is possible to enter this routine * without interruptions enabled, it has been tested with * several units (Bug in SA11xx chip?). * * This situation leads to an infinite "loop" of interrupt * routine calling and as a result the processor seems to * lock on its first call to open(). */ RTSR = RTSR_AL | RTSR_HZ; } /* clear alarm interrupt if it has occurred */ if (rtsr & RTSR_AL) rtsr &= ~RTSR_ALE; RTSR = rtsr & (RTSR_ALE | RTSR_HZE); /* update irq data & counter */ if (rtsr & RTSR_AL) events |= RTC_AF | RTC_IRQF; if (rtsr & RTSR_HZ) events |= RTC_UF | RTC_IRQF; rtc_update_irq(rtc, 1, events); if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm)) rtc_update_alarm(&rtc_alarm); spin_unlock(&sa1100_rtc_lock); return IRQ_HANDLED; } static int sa1100_rtc_open(struct device *dev) { int ret; struct platform_device *plat_dev = to_platform_device(dev); struct rtc_device *rtc = platform_get_drvdata(plat_dev); ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED, "rtc 1Hz", dev); if (ret) { dev_err(dev, "IRQ %d already in use.\n", IRQ_RTC1Hz); goto fail_ui; } ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, IRQF_DISABLED, "rtc Alrm", dev); if (ret) { dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm); goto fail_ai; } rtc->max_user_freq = RTC_FREQ; rtc_irq_set_freq(rtc, NULL, RTC_FREQ); return 0; fail_ai: free_irq(IRQ_RTC1Hz, dev); fail_ui: return ret; } static void sa1100_rtc_release(struct device *dev) { spin_lock_irq(&sa1100_rtc_lock); RTSR = 0; OIER &= ~OIER_E1; OSSR = OSSR_M1; spin_unlock_irq(&sa1100_rtc_lock); free_irq(IRQ_RTCAlrm, dev); free_irq(IRQ_RTC1Hz, dev); } static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { spin_lock_irq(&sa1100_rtc_lock); if (enabled) RTSR |= RTSR_ALE; else RTSR &= ~RTSR_ALE; spin_unlock_irq(&sa1100_rtc_lock); return 0; } static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) { rtc_time_to_tm(RCNR, tm); return 0; } static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned long time; int ret; ret = rtc_tm_to_time(tm, &time); if (ret == 0) RCNR = time; return ret; } static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { u32 rtsr; memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time)); rtsr = RTSR; alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0; alrm->pending = (rtsr & RTSR_AL) ? 1 : 0; return 0; } static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { int ret; spin_lock_irq(&sa1100_rtc_lock); ret = rtc_update_alarm(&alrm->time); if (ret == 0) { if (alrm->enabled) RTSR |= RTSR_ALE; else RTSR &= ~RTSR_ALE; } spin_unlock_irq(&sa1100_rtc_lock); return ret; } static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) { seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR); seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR); return 0; } static const struct rtc_class_ops sa1100_rtc_ops = { .open = sa1100_rtc_open, .release = sa1100_rtc_release, .read_time = sa1100_rtc_read_time, .set_time = sa1100_rtc_set_time, .read_alarm = sa1100_rtc_read_alarm, .set_alarm = sa1100_rtc_set_alarm, .proc = sa1100_rtc_proc, .alarm_irq_enable = sa1100_rtc_alarm_irq_enable, }; static int sa1100_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; /* * According to the manual we should be able to let RTTR be zero * and then a default diviser for a 32.768KHz clock is used. * Apparently this doesn't work, at least for my SA1110 rev 5. * If the clock divider is uninitialized then reset it to the * default value to get the 1Hz clock. */ if (RTTR == 0) { RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); dev_warn(&pdev->dev, "warning: " "initializing default clock divider/trim value\n"); /* The current RTC value probably doesn't make sense either */ RCNR = 0; } device_init_wakeup(&pdev->dev, 1); rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); platform_set_drvdata(pdev, rtc); /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). * * Sometimes bit 1 of the RTSR (RTSR_HZ) will wake up 1, which means an * interrupt pending, even though interrupts were never enabled. * In this case, this bit it must be reset before enabling * interruptions to avoid a nonexistent interrupt to occur. * * In principle, the same problem would apply to bit 0, although it has * never been observed to happen. * * This issue is addressed both here and in sa1100_rtc_interrupt(). * If the issue is not addressed here, in the times when the processor * wakes up with the bit set there will be one spurious interrupt. * * The issue is also dealt with in sa1100_rtc_interrupt() to be on the * safe side, once the condition that lead to this strange * initialization is unknown and could in principle happen during * normal processing. * * Notice that clearing bit 1 and 0 is accomplished by writting ONES to * the corresponding bits in RTSR. */ RTSR = RTSR_AL | RTSR_HZ; return 0; } static int sa1100_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); if (rtc) rtc_device_unregister(rtc); return 0; } #ifdef CONFIG_PM static int sa1100_rtc_suspend(struct device *dev) { if (device_may_wakeup(dev)) enable_irq_wake(IRQ_RTCAlrm); return 0; } static int sa1100_rtc_resume(struct device *dev) { if (device_may_wakeup(dev)) disable_irq_wake(IRQ_RTCAlrm); return 0; } static const struct dev_pm_ops sa1100_rtc_pm_ops = { .suspend = sa1100_rtc_suspend, .resume = sa1100_rtc_resume, }; #endif static struct platform_driver sa1100_rtc_driver = { .probe = sa1100_rtc_probe, .remove = sa1100_rtc_remove, .driver = { .name = "sa1100-rtc", #ifdef CONFIG_PM .pm = &sa1100_rtc_pm_ops, #endif }, }; static int __init sa1100_rtc_init(void) { return platform_driver_register(&sa1100_rtc_driver); } static void __exit sa1100_rtc_exit(void) { platform_driver_unregister(&sa1100_rtc_driver); } module_init(sa1100_rtc_init); module_exit(sa1100_rtc_exit); MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sa1100-rtc");
gpl-2.0
sncn-private/imx6-linux
fs/9p/vfs_addr.c
3095
8412
/* * linux/fs/9p/vfs_addr.c * * This file contians vfs address (mmap) ops for 9P2000. * * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/inet.h> #include <linux/pagemap.h> #include <linux/idr.h> #include <linux/sched.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "cache.h" #include "fid.h" /** * v9fs_fid_readpage - read an entire page in from 9P * * @fid: fid being read * @page: structure to page * */ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) { int retval; loff_t offset; char *buffer; struct inode *inode; inode = page->mapping->host; P9_DPRINTK(P9_DEBUG_VFS, "\n"); BUG_ON(!PageLocked(page)); retval = v9fs_readpage_from_fscache(inode, page); if (retval == 0) return retval; buffer = kmap(page); offset = page_offset(page); retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset); if (retval < 0) { v9fs_uncache_page(inode, page); goto done; } memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval); flush_dcache_page(page); SetPageUptodate(page); v9fs_readpage_to_fscache(inode, page); retval = 0; done: kunmap(page); unlock_page(page); return retval; } /** * v9fs_vfs_readpage - read an entire page in from 9P * * @filp: file being read * @page: structure to page * */ static int v9fs_vfs_readpage(struct file *filp, struct page *page) { return v9fs_fid_readpage(filp->private_data, page); } /** * v9fs_vfs_readpages - read a set of pages from 9P * * @filp: file being read * @mapping: the address space * @pages: list of pages to read * @nr_pages: count of pages to read * */ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { int ret = 0; struct inode *inode; inode = mapping->host; P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp); ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); if (ret == 0) return ret; ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); P9_DPRINTK(P9_DEBUG_VFS, " = %d\n", ret); return ret; } /** * v9fs_release_page - release the private state associated with a page * * Returns 1 if the page can be released, false otherwise. */ static int v9fs_release_page(struct page *page, gfp_t gfp) { if (PagePrivate(page)) return 0; return v9fs_fscache_release_page(page, gfp); } /** * v9fs_invalidate_page - Invalidate a page completely or partially * * @page: structure to page * @offset: offset in the page */ static void v9fs_invalidate_page(struct page *page, unsigned long offset) { /* * If called with zero offset, we should release * the private state assocated with the page */ if (offset == 0) v9fs_fscache_invalidate_page(page); } static int v9fs_vfs_writepage_locked(struct page *page) { char *buffer; int retval, len; loff_t offset, size; mm_segment_t old_fs; struct v9fs_inode *v9inode; struct inode *inode = page->mapping->host; v9inode = V9FS_I(inode); size = i_size_read(inode); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; set_page_writeback(page); buffer = kmap(page); offset = page_offset(page); old_fs = get_fs(); set_fs(get_ds()); /* We should have writeback_fid always set */ BUG_ON(!v9inode->writeback_fid); retval = v9fs_file_write_internal(inode, v9inode->writeback_fid, (__force const char __user *)buffer, len, &offset, 0); if (retval > 0) retval = 0; set_fs(old_fs); kunmap(page); end_page_writeback(page); return retval; } static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) { int retval; retval = v9fs_vfs_writepage_locked(page); if (retval < 0) { if (retval == -EAGAIN) { redirty_page_for_writepage(wbc, page); retval = 0; } else { SetPageError(page); mapping_set_error(page->mapping, retval); } } else retval = 0; unlock_page(page); return retval; } /** * v9fs_launder_page - Writeback a dirty page * Returns 0 on success. */ static int v9fs_launder_page(struct page *page) { int retval; struct inode *inode = page->mapping->host; v9fs_fscache_wait_on_page_write(inode, page); if (clear_page_dirty_for_io(page)) { retval = v9fs_vfs_writepage_locked(page); if (retval) return retval; } return 0; } /** * v9fs_direct_IO - 9P address space operation for direct I/O * @rw: direction (read or write) * @iocb: target I/O control block * @iov: array of vectors that define I/O buffer * @pos: offset in file to begin the operation * @nr_segs: size of iovec array * * The presence of v9fs_direct_IO() in the address space ops vector * allowes open() O_DIRECT flags which would have failed otherwise. * * In the non-cached mode, we shunt off direct read and write requests before * the VFS gets them, so this method should never be called. * * Direct IO is not 'yet' supported in the cached mode. Hence when * this routine is called through generic_file_aio_read(), the read/write fails * with an error. * */ static ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) { /* * FIXME * Now that we do caching with cache mode enabled, We need * to support direct IO */ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) " "off/no(%lld/%lu) EINVAL\n", iocb->ki_filp->f_path.dentry->d_name.name, (long long) pos, nr_segs); return -EINVAL; } static int v9fs_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int retval = 0; struct page *page; struct v9fs_inode *v9inode; pgoff_t index = pos >> PAGE_CACHE_SHIFT; struct inode *inode = mapping->host; v9inode = V9FS_I(inode); start: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { retval = -ENOMEM; goto out; } BUG_ON(!v9inode->writeback_fid); if (PageUptodate(page)) goto out; if (len == PAGE_CACHE_SIZE) goto out; retval = v9fs_fid_readpage(v9inode->writeback_fid, page); page_cache_release(page); if (!retval) goto start; out: *pagep = page; return retval; } static int v9fs_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { loff_t last_pos = pos + copied; struct inode *inode = page->mapping->host; if (unlikely(copied < len)) { /* * zero out the rest of the area */ unsigned from = pos & (PAGE_CACHE_SIZE - 1); zero_user(page, from + copied, len - copied); flush_dcache_page(page); } if (!PageUptodate(page)) SetPageUptodate(page); /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold the i_mutex. */ if (last_pos > inode->i_size) { inode_add_bytes(inode, last_pos - inode->i_size); i_size_write(inode, last_pos); } set_page_dirty(page); unlock_page(page); page_cache_release(page); return copied; } const struct address_space_operations v9fs_addr_operations = { .readpage = v9fs_vfs_readpage, .readpages = v9fs_vfs_readpages, .set_page_dirty = __set_page_dirty_nobuffers, .writepage = v9fs_vfs_writepage, .write_begin = v9fs_write_begin, .write_end = v9fs_write_end, .releasepage = v9fs_release_page, .invalidatepage = v9fs_invalidate_page, .launder_page = v9fs_launder_page, .direct_IO = v9fs_direct_IO, };
gpl-2.0
DirtyUnicorns/android_kernel_samsung_espresso10
drivers/media/rc/keymaps/rc-pinnacle-color.c
3095
2017
/* pinnacle-color.h - Keytable for pinnacle_color Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> static struct rc_map_table pinnacle_color[] = { { 0x59, KEY_MUTE }, { 0x4a, KEY_POWER }, { 0x18, KEY_TEXT }, { 0x26, KEY_TV }, { 0x3d, KEY_PRINT }, { 0x48, KEY_RED }, { 0x04, KEY_GREEN }, { 0x11, KEY_YELLOW }, { 0x00, KEY_BLUE }, { 0x2d, KEY_VOLUMEUP }, { 0x1e, KEY_VOLUMEDOWN }, { 0x49, KEY_MENU }, { 0x16, KEY_CHANNELUP }, { 0x17, KEY_CHANNELDOWN }, { 0x20, KEY_UP }, { 0x21, KEY_DOWN }, { 0x22, KEY_LEFT }, { 0x23, KEY_RIGHT }, { 0x0d, KEY_SELECT }, { 0x08, KEY_BACK }, { 0x07, KEY_REFRESH }, { 0x2f, KEY_ZOOM }, { 0x29, KEY_RECORD }, { 0x4b, KEY_PAUSE }, { 0x4d, KEY_REWIND }, { 0x2e, KEY_PLAY }, { 0x4e, KEY_FORWARD }, { 0x53, KEY_PREVIOUS }, { 0x4c, KEY_STOP }, { 0x54, KEY_NEXT }, { 0x69, KEY_0 }, { 0x6a, KEY_1 }, { 0x6b, KEY_2 }, { 0x6c, KEY_3 }, { 0x6d, KEY_4 }, { 0x6e, KEY_5 }, { 0x6f, KEY_6 }, { 0x70, KEY_7 }, { 0x71, KEY_8 }, { 0x72, KEY_9 }, { 0x74, KEY_CHANNEL }, { 0x0a, KEY_BACKSPACE }, }; static struct rc_map_list pinnacle_color_map = { .map = { .scan = pinnacle_color, .size = ARRAY_SIZE(pinnacle_color), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PINNACLE_COLOR, } }; static int __init init_rc_map_pinnacle_color(void) { return rc_map_register(&pinnacle_color_map); } static void __exit exit_rc_map_pinnacle_color(void) { rc_map_unregister(&pinnacle_color_map); } module_init(init_rc_map_pinnacle_color) module_exit(exit_rc_map_pinnacle_color) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
TEAM-Gummy/elite_kernel_jf
net/netfilter/xt_LOG.c
4631
22728
/* * This is a module which is used for logging packets. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/icmp.h> #include <net/udp.h> #include <net/tcp.h> #include <net/route.h> #include <linux/netfilter.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_LOG.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/nf_log.h> #include <net/netfilter/xt_log.h> static struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = 5, .logflags = NF_LOG_MASK, }, }, }; static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset) { struct udphdr _udph; const struct udphdr *uh; if (proto == IPPROTO_UDP) /* Max length: 10 "PROTO=UDP " */ sb_add(m, "PROTO=UDP "); else /* Max length: 14 "PROTO=UDPLITE " */ sb_add(m, "PROTO=UDPLITE "); if (fragment) goto out; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph); if (uh == NULL) { sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset); return 1; } /* Max length: 20 "SPT=65535 DPT=65535 " */ sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest), ntohs(uh->len)); out: return 0; } static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset, unsigned int logflags) { struct tcphdr _tcph; const struct tcphdr *th; /* Max length: 10 "PROTO=TCP " */ sb_add(m, "PROTO=TCP "); if (fragment) return 0; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); if (th == NULL) { sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset); return 1; } /* Max length: 20 "SPT=65535 DPT=65535 " */ sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest)); /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */ if (logflags & XT_LOG_TCPSEQ) sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq)); /* Max length: 13 "WINDOW=65535 " */ sb_add(m, "WINDOW=%u ", ntohs(th->window)); /* Max length: 9 "RES=0x3C " */ sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22)); /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */ if (th->cwr) sb_add(m, "CWR "); if (th->ece) sb_add(m, "ECE "); if (th->urg) sb_add(m, "URG "); if (th->ack) sb_add(m, "ACK "); if (th->psh) sb_add(m, "PSH "); if (th->rst) sb_add(m, "RST "); if (th->syn) sb_add(m, "SYN "); if (th->fin) sb_add(m, "FIN "); /* Max length: 11 "URGP=65535 " */ sb_add(m, "URGP=%u ", ntohs(th->urg_ptr)); if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) { u_int8_t _opt[60 - sizeof(struct tcphdr)]; const u_int8_t *op; unsigned int i; unsigned int optsize = th->doff*4 - sizeof(struct tcphdr); op = skb_header_pointer(skb, offset + sizeof(struct tcphdr), optsize, _opt); if (op == NULL) { sb_add(m, "OPT (TRUNCATED)"); return 1; } /* Max length: 127 "OPT (" 15*4*2chars ") " */ sb_add(m, "OPT ("); for (i = 0; i < optsize; i++) sb_add(m, "%02X", op[i]); sb_add(m, ") "); } return 0; } /* One level of recursion won't kill us */ static void dump_ipv4_packet(struct sbuff *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int iphoff) { struct iphdr _iph; const struct iphdr *ih; unsigned int logflags; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_MASK; ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph); if (ih == NULL) { sb_add(m, "TRUNCATED"); return; } /* Important fields: * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */ /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */ sb_add(m, "SRC=%pI4 DST=%pI4 ", &ih->saddr, &ih->daddr); /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */ sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ", ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK, ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id)); /* Max length: 6 "CE DF MF " */ if (ntohs(ih->frag_off) & IP_CE) sb_add(m, "CE "); if (ntohs(ih->frag_off) & IP_DF) sb_add(m, "DF "); if (ntohs(ih->frag_off) & IP_MF) sb_add(m, "MF "); /* Max length: 11 "FRAG:65535 " */ if (ntohs(ih->frag_off) & IP_OFFSET) sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET); if ((logflags & XT_LOG_IPOPT) && ih->ihl * 4 > sizeof(struct iphdr)) { const unsigned char *op; unsigned char _opt[4 * 15 - sizeof(struct iphdr)]; unsigned int i, optsize; optsize = ih->ihl * 4 - sizeof(struct iphdr); op = skb_header_pointer(skb, iphoff+sizeof(_iph), optsize, _opt); if (op == NULL) { sb_add(m, "TRUNCATED"); return; } /* Max length: 127 "OPT (" 15*4*2chars ") " */ sb_add(m, "OPT ("); for (i = 0; i < optsize; i++) sb_add(m, "%02X", op[i]); sb_add(m, ") "); } switch (ih->protocol) { case IPPROTO_TCP: if (dump_tcp_header(m, skb, ih->protocol, ntohs(ih->frag_off) & IP_OFFSET, iphoff+ih->ihl*4, logflags)) return; break; case IPPROTO_UDP: case IPPROTO_UDPLITE: if (dump_udp_header(m, skb, ih->protocol, ntohs(ih->frag_off) & IP_OFFSET, iphoff+ih->ihl*4)) return; break; case IPPROTO_ICMP: { struct icmphdr _icmph; const struct icmphdr *ich; static const size_t required_len[NR_ICMP_TYPES+1] = { [ICMP_ECHOREPLY] = 4, [ICMP_DEST_UNREACH] = 8 + sizeof(struct iphdr), [ICMP_SOURCE_QUENCH] = 8 + sizeof(struct iphdr), [ICMP_REDIRECT] = 8 + sizeof(struct iphdr), [ICMP_ECHO] = 4, [ICMP_TIME_EXCEEDED] = 8 + sizeof(struct iphdr), [ICMP_PARAMETERPROB] = 8 + sizeof(struct iphdr), [ICMP_TIMESTAMP] = 20, [ICMP_TIMESTAMPREPLY] = 20, [ICMP_ADDRESS] = 12, [ICMP_ADDRESSREPLY] = 12 }; /* Max length: 11 "PROTO=ICMP " */ sb_add(m, "PROTO=ICMP "); if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ich = skb_header_pointer(skb, iphoff + ih->ihl * 4, sizeof(_icmph), &_icmph); if (ich == NULL) { sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl*4); break; } /* Max length: 18 "TYPE=255 CODE=255 " */ sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code); /* Max length: 25 "INCOMPLETE [65535 bytes] " */ if (ich->type <= NR_ICMP_TYPES && required_len[ich->type] && skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) { sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl*4); break; } switch (ich->type) { case ICMP_ECHOREPLY: case ICMP_ECHO: /* Max length: 19 "ID=65535 SEQ=65535 " */ sb_add(m, "ID=%u SEQ=%u ", ntohs(ich->un.echo.id), ntohs(ich->un.echo.sequence)); break; case ICMP_PARAMETERPROB: /* Max length: 14 "PARAMETER=255 " */ sb_add(m, "PARAMETER=%u ", ntohl(ich->un.gateway) >> 24); break; case ICMP_REDIRECT: /* Max length: 24 "GATEWAY=255.255.255.255 " */ sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway); /* Fall through */ case ICMP_DEST_UNREACH: case ICMP_SOURCE_QUENCH: case ICMP_TIME_EXCEEDED: /* Max length: 3+maxlen */ if (!iphoff) { /* Only recurse once. */ sb_add(m, "["); dump_ipv4_packet(m, info, skb, iphoff + ih->ihl*4+sizeof(_icmph)); sb_add(m, "] "); } /* Max length: 10 "MTU=65535 " */ if (ich->type == ICMP_DEST_UNREACH && ich->code == ICMP_FRAG_NEEDED) sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu)); } break; } /* Max Length */ case IPPROTO_AH: { struct ip_auth_hdr _ahdr; const struct ip_auth_hdr *ah; if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 9 "PROTO=AH " */ sb_add(m, "PROTO=AH "); /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ah = skb_header_pointer(skb, iphoff+ih->ihl*4, sizeof(_ahdr), &_ahdr); if (ah == NULL) { sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl*4); break; } /* Length: 15 "SPI=0xF1234567 " */ sb_add(m, "SPI=0x%x ", ntohl(ah->spi)); break; } case IPPROTO_ESP: { struct ip_esp_hdr _esph; const struct ip_esp_hdr *eh; /* Max length: 10 "PROTO=ESP " */ sb_add(m, "PROTO=ESP "); if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ eh = skb_header_pointer(skb, iphoff+ih->ihl*4, sizeof(_esph), &_esph); if (eh == NULL) { sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl*4); break; } /* Length: 15 "SPI=0xF1234567 " */ sb_add(m, "SPI=0x%x ", ntohl(eh->spi)); break; } /* Max length: 10 "PROTO 255 " */ default: sb_add(m, "PROTO=%u ", ih->protocol); } /* Max length: 15 "UID=4294967295 " */ if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) { read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) sb_add(m, "UID=%u GID=%u ", skb->sk->sk_socket->file->f_cred->fsuid, skb->sk->sk_socket->file->f_cred->fsgid); read_unlock_bh(&skb->sk->sk_callback_lock); } /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!iphoff && skb->mark) sb_add(m, "MARK=0x%x ", skb->mark); /* Proto Max log string length */ /* IP: 40+46+6+11+127 = 230 */ /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ /* UDP: 10+max(25,20) = 35 */ /* UDPLITE: 14+max(25,20) = 39 */ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */ /* ESP: 10+max(25)+15 = 50 */ /* AH: 9+max(25)+15 = 49 */ /* unknown: 10 */ /* (ICMP allows recursion one level deep) */ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */ /* maxlen = 230+ 91 + 230 + 252 = 803 */ } static void dump_ipv4_mac_header(struct sbuff *m, const struct nf_loginfo *info, const struct sk_buff *skb) { struct net_device *dev = skb->dev; unsigned int logflags = 0; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; if (!(logflags & XT_LOG_MACDECODE)) goto fallback; switch (dev->type) { case ARPHRD_ETHER: sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, ntohs(eth_hdr(skb)->h_proto)); return; default: break; } fallback: sb_add(m, "MAC="); if (dev->hard_header_len && skb->mac_header != skb->network_header) { const unsigned char *p = skb_mac_header(skb); unsigned int i; sb_add(m, "%02x", *p++); for (i = 1; i < dev->hard_header_len; i++, p++) sb_add(m, ":%02x", *p); } sb_add(m, " "); } static void log_packet_common(struct sbuff *m, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level, prefix, in ? in->name : "", out ? out->name : ""); #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge) { const struct net_device *physindev; const struct net_device *physoutdev; physindev = skb->nf_bridge->physindev; if (physindev && in != physindev) sb_add(m, "PHYSIN=%s ", physindev->name); physoutdev = skb->nf_bridge->physoutdev; if (physoutdev && out != physoutdev) sb_add(m, "PHYSOUT=%s ", physoutdev->name); } #endif } static void ipt_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct sbuff *m = sb_open(); if (!loginfo) loginfo = &default_loginfo; log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix); if (in != NULL) dump_ipv4_mac_header(m, loginfo, skb); dump_ipv4_packet(m, loginfo, skb, 0); sb_close(m); } #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) /* One level of recursion won't kill us */ static void dump_ipv6_packet(struct sbuff *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int ip6hoff, int recurse) { u_int8_t currenthdr; int fragment; struct ipv6hdr _ip6h; const struct ipv6hdr *ih; unsigned int ptr; unsigned int hdrlen = 0; unsigned int logflags; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_MASK; ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h); if (ih == NULL) { sb_add(m, "TRUNCATED"); return; } /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */ sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr); /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */ sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", ntohs(ih->payload_len) + sizeof(struct ipv6hdr), (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20, ih->hop_limit, (ntohl(*(__be32 *)ih) & 0x000fffff)); fragment = 0; ptr = ip6hoff + sizeof(struct ipv6hdr); currenthdr = ih->nexthdr; while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) { struct ipv6_opt_hdr _hdr; const struct ipv6_opt_hdr *hp; hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); if (hp == NULL) { sb_add(m, "TRUNCATED"); return; } /* Max length: 48 "OPT (...) " */ if (logflags & XT_LOG_IPOPT) sb_add(m, "OPT ( "); switch (currenthdr) { case IPPROTO_FRAGMENT: { struct frag_hdr _fhdr; const struct frag_hdr *fh; sb_add(m, "FRAG:"); fh = skb_header_pointer(skb, ptr, sizeof(_fhdr), &_fhdr); if (fh == NULL) { sb_add(m, "TRUNCATED "); return; } /* Max length: 6 "65535 " */ sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8); /* Max length: 11 "INCOMPLETE " */ if (fh->frag_off & htons(0x0001)) sb_add(m, "INCOMPLETE "); sb_add(m, "ID:%08x ", ntohl(fh->identification)); if (ntohs(fh->frag_off) & 0xFFF8) fragment = 1; hdrlen = 8; break; } case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: case IPPROTO_HOPOPTS: if (fragment) { if (logflags & XT_LOG_IPOPT) sb_add(m, ")"); return; } hdrlen = ipv6_optlen(hp); break; /* Max Length */ case IPPROTO_AH: if (logflags & XT_LOG_IPOPT) { struct ip_auth_hdr _ahdr; const struct ip_auth_hdr *ah; /* Max length: 3 "AH " */ sb_add(m, "AH "); if (fragment) { sb_add(m, ")"); return; } ah = skb_header_pointer(skb, ptr, sizeof(_ahdr), &_ahdr); if (ah == NULL) { /* * Max length: 26 "INCOMPLETE [65535 * bytes] )" */ sb_add(m, "INCOMPLETE [%u bytes] )", skb->len - ptr); return; } /* Length: 15 "SPI=0xF1234567 */ sb_add(m, "SPI=0x%x ", ntohl(ah->spi)); } hdrlen = (hp->hdrlen+2)<<2; break; case IPPROTO_ESP: if (logflags & XT_LOG_IPOPT) { struct ip_esp_hdr _esph; const struct ip_esp_hdr *eh; /* Max length: 4 "ESP " */ sb_add(m, "ESP "); if (fragment) { sb_add(m, ")"); return; } /* * Max length: 26 "INCOMPLETE [65535 bytes] )" */ eh = skb_header_pointer(skb, ptr, sizeof(_esph), &_esph); if (eh == NULL) { sb_add(m, "INCOMPLETE [%u bytes] )", skb->len - ptr); return; } /* Length: 16 "SPI=0xF1234567 )" */ sb_add(m, "SPI=0x%x )", ntohl(eh->spi)); } return; default: /* Max length: 20 "Unknown Ext Hdr 255" */ sb_add(m, "Unknown Ext Hdr %u", currenthdr); return; } if (logflags & XT_LOG_IPOPT) sb_add(m, ") "); currenthdr = hp->nexthdr; ptr += hdrlen; } switch (currenthdr) { case IPPROTO_TCP: if (dump_tcp_header(m, skb, currenthdr, fragment, ptr, logflags)) return; break; case IPPROTO_UDP: case IPPROTO_UDPLITE: if (dump_udp_header(m, skb, currenthdr, fragment, ptr)) return; break; case IPPROTO_ICMPV6: { struct icmp6hdr _icmp6h; const struct icmp6hdr *ic; /* Max length: 13 "PROTO=ICMPv6 " */ sb_add(m, "PROTO=ICMPv6 "); if (fragment) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h); if (ic == NULL) { sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr); return; } /* Max length: 18 "TYPE=255 CODE=255 " */ sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code); switch (ic->icmp6_type) { case ICMPV6_ECHO_REQUEST: case ICMPV6_ECHO_REPLY: /* Max length: 19 "ID=65535 SEQ=65535 " */ sb_add(m, "ID=%u SEQ=%u ", ntohs(ic->icmp6_identifier), ntohs(ic->icmp6_sequence)); break; case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: break; case ICMPV6_PARAMPROB: /* Max length: 17 "POINTER=ffffffff " */ sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer)); /* Fall through */ case ICMPV6_DEST_UNREACH: case ICMPV6_PKT_TOOBIG: case ICMPV6_TIME_EXCEED: /* Max length: 3+maxlen */ if (recurse) { sb_add(m, "["); dump_ipv6_packet(m, info, skb, ptr + sizeof(_icmp6h), 0); sb_add(m, "] "); } /* Max length: 10 "MTU=65535 " */ if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu)); } break; } /* Max length: 10 "PROTO=255 " */ default: sb_add(m, "PROTO=%u ", currenthdr); } /* Max length: 15 "UID=4294967295 " */ if ((logflags & XT_LOG_UID) && recurse && skb->sk) { read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) sb_add(m, "UID=%u GID=%u ", skb->sk->sk_socket->file->f_cred->fsuid, skb->sk->sk_socket->file->f_cred->fsgid); read_unlock_bh(&skb->sk->sk_callback_lock); } /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!recurse && skb->mark) sb_add(m, "MARK=0x%x ", skb->mark); } static void dump_ipv6_mac_header(struct sbuff *m, const struct nf_loginfo *info, const struct sk_buff *skb) { struct net_device *dev = skb->dev; unsigned int logflags = 0; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; if (!(logflags & XT_LOG_MACDECODE)) goto fallback; switch (dev->type) { case ARPHRD_ETHER: sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, ntohs(eth_hdr(skb)->h_proto)); return; default: break; } fallback: sb_add(m, "MAC="); if (dev->hard_header_len && skb->mac_header != skb->network_header) { const unsigned char *p = skb_mac_header(skb); unsigned int len = dev->hard_header_len; unsigned int i; if (dev->type == ARPHRD_SIT) { p -= ETH_HLEN; if (p < skb->head) p = NULL; } if (p != NULL) { sb_add(m, "%02x", *p++); for (i = 1; i < len; i++) sb_add(m, ":%02x", *p++); } sb_add(m, " "); if (dev->type == ARPHRD_SIT) { const struct iphdr *iph = (struct iphdr *)skb_mac_header(skb); sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr); } } else sb_add(m, " "); } static void ip6t_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct sbuff *m = sb_open(); if (!loginfo) loginfo = &default_loginfo; log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix); if (in != NULL) dump_ipv6_mac_header(m, loginfo, skb); dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1); sb_close(m); } #endif static unsigned int log_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_log_info *loginfo = par->targinfo; struct nf_loginfo li; li.type = NF_LOG_TYPE_LOG; li.u.log.level = loginfo->level; li.u.log.logflags = loginfo->logflags; if (par->family == NFPROTO_IPV4) ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, par->out, &li, loginfo->prefix); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) else if (par->family == NFPROTO_IPV6) ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, par->out, &li, loginfo->prefix); #endif else WARN_ON_ONCE(1); return XT_CONTINUE; } static int log_tg_check(const struct xt_tgchk_param *par) { const struct xt_log_info *loginfo = par->targinfo; if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6) return -EINVAL; if (loginfo->level >= 8) { pr_debug("level %u >= 8\n", loginfo->level); return -EINVAL; } if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { pr_debug("prefix is not null-terminated\n"); return -EINVAL; } return 0; } static struct xt_target log_tg_regs[] __read_mostly = { { .name = "LOG", .family = NFPROTO_IPV4, .target = log_tg, .targetsize = sizeof(struct xt_log_info), .checkentry = log_tg_check, .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "LOG", .family = NFPROTO_IPV6, .target = log_tg, .targetsize = sizeof(struct xt_log_info), .checkentry = log_tg_check, .me = THIS_MODULE, }, #endif }; static struct nf_logger ipt_log_logger __read_mostly = { .name = "ipt_LOG", .logfn = &ipt_log_packet, .me = THIS_MODULE, }; #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static struct nf_logger ip6t_log_logger __read_mostly = { .name = "ip6t_LOG", .logfn = &ip6t_log_packet, .me = THIS_MODULE, }; #endif static int __init log_tg_init(void) { int ret; ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs)); if (ret < 0) return ret; nf_log_register(NFPROTO_IPV4, &ipt_log_logger); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) nf_log_register(NFPROTO_IPV6, &ip6t_log_logger); #endif return 0; } static void __exit log_tg_exit(void) { nf_log_unregister(&ipt_log_logger); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) nf_log_unregister(&ip6t_log_logger); #endif xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs)); } module_init(log_tg_init); module_exit(log_tg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>"); MODULE_DESCRIPTION("Xtables: IPv4/IPv6 packet logging"); MODULE_ALIAS("ipt_LOG"); MODULE_ALIAS("ip6t_LOG");
gpl-2.0
sorinstanila/kernel_msm_kk
drivers/isdn/hysdn/hycapi.c
5143
22916
/* $Id: hycapi.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $ * * Linux driver for HYSDN cards, CAPI2.0-Interface. * * Author Ulrich Albrecht <u.albrecht@hypercope.de> for Hypercope GmbH * Copyright 2000 by Hypercope GmbH * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/signal.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/slab.h> #define VER_DRIVER 0 #define VER_CARDTYPE 1 #define VER_HWID 2 #define VER_SERIAL 3 #define VER_OPTION 4 #define VER_PROTO 5 #define VER_PROFILE 6 #define VER_CAPI 7 #include "hysdn_defs.h" #include <linux/kernelcapi.h> static char hycapi_revision[] = "$Revision: 1.8.6.4 $"; unsigned int hycapi_enable = 0xffffffff; module_param(hycapi_enable, uint, 0); typedef struct _hycapi_appl { unsigned int ctrl_mask; capi_register_params rp; struct sk_buff *listen_req[CAPI_MAXCONTR]; } hycapi_appl; static hycapi_appl hycapi_applications[CAPI_MAXAPPL]; static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb); static inline int _hycapi_appCheck(int app_id, int ctrl_no) { if ((ctrl_no <= 0) || (ctrl_no > CAPI_MAXCONTR) || (app_id <= 0) || (app_id > CAPI_MAXAPPL)) { printk(KERN_ERR "HYCAPI: Invalid request app_id %d for controller %d", app_id, ctrl_no); return -1; } return ((hycapi_applications[app_id - 1].ctrl_mask & (1 << (ctrl_no-1))) != 0); } /****************************** Kernel-Capi callback reset_ctr ******************************/ static void hycapi_reset_ctr(struct capi_ctr *ctrl) { hycapictrl_info *cinfo = ctrl->driverdata; #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "HYCAPI hycapi_reset_ctr\n"); #endif capilib_release(&cinfo->ncci_head); capi_ctr_down(ctrl); } /****************************** Kernel-Capi callback remove_ctr ******************************/ static void hycapi_remove_ctr(struct capi_ctr *ctrl) { int i; hycapictrl_info *cinfo = NULL; hysdn_card *card = NULL; #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "HYCAPI hycapi_remove_ctr\n"); #endif cinfo = (hycapictrl_info *)(ctrl->driverdata); if (!cinfo) { printk(KERN_ERR "No hycapictrl_info set!"); return; } card = cinfo->card; capi_ctr_suspend_output(ctrl); for (i = 0; i < CAPI_MAXAPPL; i++) { if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) { kfree_skb(hycapi_applications[i].listen_req[ctrl->cnr - 1]); hycapi_applications[i].listen_req[ctrl->cnr - 1] = NULL; } } detach_capi_ctr(ctrl); ctrl->driverdata = NULL; kfree(card->hyctrlinfo); card->hyctrlinfo = NULL; } /*********************************************************** Queue a CAPI-message to the controller. ***********************************************************/ static void hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb) { hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); hysdn_card *card = cinfo->card; spin_lock_irq(&cinfo->lock); #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_send_message\n"); #endif cinfo->skbs[cinfo->in_idx++] = skb; /* add to buffer list */ if (cinfo->in_idx >= HYSDN_MAX_CAPI_SKB) cinfo->in_idx = 0; /* wrap around */ cinfo->sk_count++; /* adjust counter */ if (cinfo->sk_count >= HYSDN_MAX_CAPI_SKB) { /* inform upper layers we're full */ printk(KERN_ERR "HYSDN Card%d: CAPI-buffer overrun!\n", card->myid); capi_ctr_suspend_output(ctrl); } cinfo->tx_skb = skb; spin_unlock_irq(&cinfo->lock); schedule_work(&card->irq_queue); } /*********************************************************** hycapi_register_internal Send down the CAPI_REGISTER-Command to the controller. This functions will also be used if the adapter has been rebooted to re-register any applications in the private list. ************************************************************/ static void hycapi_register_internal(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) { char ExtFeatureDefaults[] = "49 /0/0/0/0,*/1,*/2,*/3,*/4,*/5,*/6,*/7,*/8,*/9,*"; hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); hysdn_card *card = cinfo->card; struct sk_buff *skb; __u16 len; __u8 _command = 0xa0, _subcommand = 0x80; __u16 MessageNumber = 0x0000; __u16 MessageBufferSize = 0; int slen = strlen(ExtFeatureDefaults); #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_register_appl\n"); #endif MessageBufferSize = rp->level3cnt * rp->datablkcnt * rp->datablklen; len = CAPI_MSG_BASELEN + 8 + slen + 1; if (!(skb = alloc_skb(len, GFP_ATOMIC))) { printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n", card->myid); return; } memcpy(skb_put(skb, sizeof(__u16)), &len, sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u16)), &appl, sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u8)), &_command, sizeof(_command)); memcpy(skb_put(skb, sizeof(__u8)), &_subcommand, sizeof(_subcommand)); memcpy(skb_put(skb, sizeof(__u16)), &MessageNumber, sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u16)), &MessageBufferSize, sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u16)), &(rp->level3cnt), sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u16)), &(rp->datablkcnt), sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u16)), &(rp->datablklen), sizeof(__u16)); memcpy(skb_put(skb, slen), ExtFeatureDefaults, slen); hycapi_applications[appl - 1].ctrl_mask |= (1 << (ctrl->cnr - 1)); hycapi_send_message(ctrl, skb); } /************************************************************ hycapi_restart_internal After an adapter has been rebootet, re-register all applications and send a LISTEN_REQ (if there has been such a thing ) *************************************************************/ static void hycapi_restart_internal(struct capi_ctr *ctrl) { int i; struct sk_buff *skb; #ifdef HYCAPI_PRINTFNAMES printk(KERN_WARNING "HYSDN: hycapi_restart_internal"); #endif for (i = 0; i < CAPI_MAXAPPL; i++) { if (_hycapi_appCheck(i + 1, ctrl->cnr) == 1) { hycapi_register_internal(ctrl, i + 1, &hycapi_applications[i].rp); if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) { skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], GFP_ATOMIC); hycapi_sendmsg_internal(ctrl, skb); } } } } /************************************************************* Register an application. Error-checking is done for CAPI-compliance. The application is recorded in the internal list. *************************************************************/ static void hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) { int MaxLogicalConnections = 0, MaxBDataBlocks = 0, MaxBDataLen = 0; hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); hysdn_card *card = cinfo->card; int chk = _hycapi_appCheck(appl, ctrl->cnr); if (chk < 0) { return; } if (chk == 1) { printk(KERN_INFO "HYSDN: apl %d already registered\n", appl); return; } MaxBDataBlocks = rp->datablkcnt > CAPI_MAXDATAWINDOW ? CAPI_MAXDATAWINDOW : rp->datablkcnt; rp->datablkcnt = MaxBDataBlocks; MaxBDataLen = rp->datablklen < 1024 ? 1024 : rp->datablklen; rp->datablklen = MaxBDataLen; MaxLogicalConnections = rp->level3cnt; if (MaxLogicalConnections < 0) { MaxLogicalConnections = card->bchans * -MaxLogicalConnections; } if (MaxLogicalConnections == 0) { MaxLogicalConnections = card->bchans; } rp->level3cnt = MaxLogicalConnections; memcpy(&hycapi_applications[appl - 1].rp, rp, sizeof(capi_register_params)); } /********************************************************************* hycapi_release_internal Send down a CAPI_RELEASE to the controller. *********************************************************************/ static void hycapi_release_internal(struct capi_ctr *ctrl, __u16 appl) { hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); hysdn_card *card = cinfo->card; struct sk_buff *skb; __u16 len; __u8 _command = 0xa1, _subcommand = 0x80; __u16 MessageNumber = 0x0000; capilib_release_appl(&cinfo->ncci_head, appl); #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_release_appl\n"); #endif len = CAPI_MSG_BASELEN; if (!(skb = alloc_skb(len, GFP_ATOMIC))) { printk(KERN_ERR "HYSDN card%d: memory squeeze in hycapi_register_appl\n", card->myid); return; } memcpy(skb_put(skb, sizeof(__u16)), &len, sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u16)), &appl, sizeof(__u16)); memcpy(skb_put(skb, sizeof(__u8)), &_command, sizeof(_command)); memcpy(skb_put(skb, sizeof(__u8)), &_subcommand, sizeof(_subcommand)); memcpy(skb_put(skb, sizeof(__u16)), &MessageNumber, sizeof(__u16)); hycapi_send_message(ctrl, skb); hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1)); } /****************************************************************** hycapi_release_appl Release the application from the internal list an remove it's registration at controller-level ******************************************************************/ static void hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl) { int chk; chk = _hycapi_appCheck(appl, ctrl->cnr); if (chk < 0) { printk(KERN_ERR "HYCAPI: Releasing invalid appl %d on controller %d\n", appl, ctrl->cnr); return; } if (hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]) { kfree_skb(hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]); hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1] = NULL; } if (chk == 1) { hycapi_release_internal(ctrl, appl); } } /************************************************************** Kill a single controller. **************************************************************/ int hycapi_capi_release(hysdn_card *card) { hycapictrl_info *cinfo = card->hyctrlinfo; struct capi_ctr *ctrl; #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_capi_release\n"); #endif if (cinfo) { ctrl = &cinfo->capi_ctrl; hycapi_remove_ctr(ctrl); } return 0; } /************************************************************** hycapi_capi_stop Stop CAPI-Output on a card. (e.g. during reboot) ***************************************************************/ int hycapi_capi_stop(hysdn_card *card) { hycapictrl_info *cinfo = card->hyctrlinfo; struct capi_ctr *ctrl; #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_capi_stop\n"); #endif if (cinfo) { ctrl = &cinfo->capi_ctrl; /* ctrl->suspend_output(ctrl); */ capi_ctr_down(ctrl); } return 0; } /*************************************************************** hycapi_send_message Send a message to the controller. Messages are parsed for their Command/Subcommand-type, and appropriate action's are performed. Note that we have to muck around with a 64Bit-DATA_REQ as there are firmware-releases that do not check the MsgLen-Indication! ***************************************************************/ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) { __u16 appl_id; int _len, _len2; __u8 msghead[64]; hycapictrl_info *cinfo = ctrl->driverdata; u16 retval = CAPI_NOERROR; appl_id = CAPIMSG_APPID(skb->data); switch (_hycapi_appCheck(appl_id, ctrl->cnr)) { case 0: /* printk(KERN_INFO "Need to register\n"); */ hycapi_register_internal(ctrl, appl_id, &(hycapi_applications[appl_id - 1].rp)); break; case 1: break; default: printk(KERN_ERR "HYCAPI: Controller mixup!\n"); retval = CAPI_ILLAPPNR; goto out; } switch (CAPIMSG_CMD(skb->data)) { case CAPI_DISCONNECT_B3_RESP: capilib_free_ncci(&cinfo->ncci_head, appl_id, CAPIMSG_NCCI(skb->data)); break; case CAPI_DATA_B3_REQ: _len = CAPIMSG_LEN(skb->data); if (_len > 22) { _len2 = _len - 22; skb_copy_from_linear_data(skb, msghead, 22); skb_copy_to_linear_data_offset(skb, _len2, msghead, 22); skb_pull(skb, _len2); CAPIMSG_SETLEN(skb->data, 22); retval = capilib_data_b3_req(&cinfo->ncci_head, CAPIMSG_APPID(skb->data), CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); } break; case CAPI_LISTEN_REQ: if (hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]) { kfree_skb(hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]); hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1] = NULL; } if (!(hycapi_applications[appl_id -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, GFP_ATOMIC))) { printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n"); } break; default: break; } out: if (retval == CAPI_NOERROR) hycapi_sendmsg_internal(ctrl, skb); else dev_kfree_skb_any(skb); return retval; } static int hycapi_proc_show(struct seq_file *m, void *v) { struct capi_ctr *ctrl = m->private; hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); hysdn_card *card = cinfo->card; char *s; seq_printf(m, "%-16s %s\n", "name", cinfo->cardname); seq_printf(m, "%-16s 0x%x\n", "io", card->iobase); seq_printf(m, "%-16s %d\n", "irq", card->irq); switch (card->brdtype) { case BD_PCCARD: s = "HYSDN Hycard"; break; case BD_ERGO: s = "HYSDN Ergo2"; break; case BD_METRO: s = "HYSDN Metro4"; break; case BD_CHAMP2: s = "HYSDN Champ2"; break; case BD_PLEXUS: s = "HYSDN Plexus30"; break; default: s = "???"; break; } seq_printf(m, "%-16s %s\n", "type", s); if ((s = cinfo->version[VER_DRIVER]) != NULL) seq_printf(m, "%-16s %s\n", "ver_driver", s); if ((s = cinfo->version[VER_CARDTYPE]) != NULL) seq_printf(m, "%-16s %s\n", "ver_cardtype", s); if ((s = cinfo->version[VER_SERIAL]) != NULL) seq_printf(m, "%-16s %s\n", "ver_serial", s); seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname); return 0; } static int hycapi_proc_open(struct inode *inode, struct file *file) { return single_open(file, hycapi_proc_show, PDE(inode)->data); } static const struct file_operations hycapi_proc_fops = { .owner = THIS_MODULE, .open = hycapi_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /************************************************************** hycapi_load_firmware This does NOT load any firmware, but the callback somehow is needed on capi-interface registration. **************************************************************/ static int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) { #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_load_firmware\n"); #endif return 0; } static char *hycapi_procinfo(struct capi_ctr *ctrl) { hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_proc_info\n"); #endif if (!cinfo) return ""; sprintf(cinfo->infobuf, "%s %s 0x%x %d %s", cinfo->cardname[0] ? cinfo->cardname : "-", cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-", cinfo->card ? cinfo->card->iobase : 0x0, cinfo->card ? cinfo->card->irq : 0, hycapi_revision ); return cinfo->infobuf; } /****************************************************************** hycapi_rx_capipkt Receive a capi-message. All B3_DATA_IND are converted to 64K-extension compatible format. New nccis are created if necessary. *******************************************************************/ void hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf, unsigned short len) { struct sk_buff *skb; hycapictrl_info *cinfo = card->hyctrlinfo; struct capi_ctr *ctrl; __u16 ApplId; __u16 MsgLen, info; __u16 len2, CapiCmd; __u32 CP64[2] = {0, 0}; #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_rx_capipkt\n"); #endif if (!cinfo) { return; } ctrl = &cinfo->capi_ctrl; if (len < CAPI_MSG_BASELEN) { printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, length %d!\n", card->myid, len); return; } MsgLen = CAPIMSG_LEN(buf); ApplId = CAPIMSG_APPID(buf); CapiCmd = CAPIMSG_CMD(buf); if ((CapiCmd == CAPI_DATA_B3_IND) && (MsgLen < 30)) { len2 = len + (30 - MsgLen); if (!(skb = alloc_skb(len2, GFP_ATOMIC))) { printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n", card->myid); return; } memcpy(skb_put(skb, MsgLen), buf, MsgLen); memcpy(skb_put(skb, 2 * sizeof(__u32)), CP64, 2 * sizeof(__u32)); memcpy(skb_put(skb, len - MsgLen), buf + MsgLen, len - MsgLen); CAPIMSG_SETLEN(skb->data, 30); } else { if (!(skb = alloc_skb(len, GFP_ATOMIC))) { printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n", card->myid); return; } memcpy(skb_put(skb, len), buf, len); } switch (CAPIMSG_CMD(skb->data)) { case CAPI_CONNECT_B3_CONF: /* Check info-field for error-indication: */ info = CAPIMSG_U16(skb->data, 12); switch (info) { case 0: capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data), hycapi_applications[ApplId - 1].rp.datablkcnt); break; case 0x0001: printk(KERN_ERR "HYSDN Card%d: NCPI not supported by current " "protocol. NCPI ignored.\n", card->myid); break; case 0x2001: printk(KERN_ERR "HYSDN Card%d: Message not supported in" " current state\n", card->myid); break; case 0x2002: printk(KERN_ERR "HYSDN Card%d: invalid PLCI\n", card->myid); break; case 0x2004: printk(KERN_ERR "HYSDN Card%d: out of NCCI\n", card->myid); break; case 0x3008: printk(KERN_ERR "HYSDN Card%d: NCPI not supported\n", card->myid); break; default: printk(KERN_ERR "HYSDN Card%d: Info in CONNECT_B3_CONF: %d\n", card->myid, info); break; } break; case CAPI_CONNECT_B3_IND: capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data), hycapi_applications[ApplId - 1].rp.datablkcnt); break; case CAPI_DATA_B3_CONF: capilib_data_b3_conf(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); break; default: break; } capi_ctr_handle_message(ctrl, ApplId, skb); } /****************************************************************** hycapi_tx_capiack Internally acknowledge a msg sent. This will remove the msg from the internal queue. *******************************************************************/ void hycapi_tx_capiack(hysdn_card *card) { hycapictrl_info *cinfo = card->hyctrlinfo; #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_tx_capiack\n"); #endif if (!cinfo) { return; } spin_lock_irq(&cinfo->lock); kfree_skb(cinfo->skbs[cinfo->out_idx]); /* free skb */ cinfo->skbs[cinfo->out_idx++] = NULL; if (cinfo->out_idx >= HYSDN_MAX_CAPI_SKB) cinfo->out_idx = 0; /* wrap around */ if (cinfo->sk_count-- == HYSDN_MAX_CAPI_SKB) /* dec usage count */ capi_ctr_resume_output(&cinfo->capi_ctrl); spin_unlock_irq(&cinfo->lock); } /*************************************************************** hycapi_tx_capiget(hysdn_card *card) This is called when polling for messages to SEND. ****************************************************************/ struct sk_buff * hycapi_tx_capiget(hysdn_card *card) { hycapictrl_info *cinfo = card->hyctrlinfo; if (!cinfo) { return (struct sk_buff *)NULL; } if (!cinfo->sk_count) return (struct sk_buff *)NULL; /* nothing available */ return (cinfo->skbs[cinfo->out_idx]); /* next packet to send */ } /********************************************************** int hycapi_init() attach the capi-driver to the kernel-capi. ***********************************************************/ int hycapi_init(void) { int i; for (i = 0; i < CAPI_MAXAPPL; i++) { memset(&(hycapi_applications[i]), 0, sizeof(hycapi_appl)); } return (0); } /************************************************************** hycapi_cleanup(void) detach the capi-driver to the kernel-capi. Actually this should free some more ressources. Do that later. **************************************************************/ void hycapi_cleanup(void) { } /******************************************************************** hycapi_capi_create(hysdn_card *card) Attach the card with its capi-ctrl. *********************************************************************/ static void hycapi_fill_profile(hysdn_card *card) { hycapictrl_info *cinfo = NULL; struct capi_ctr *ctrl = NULL; cinfo = card->hyctrlinfo; if (!cinfo) return; ctrl = &cinfo->capi_ctrl; strcpy(ctrl->manu, "Hypercope"); ctrl->version.majorversion = 2; ctrl->version.minorversion = 0; ctrl->version.majormanuversion = 3; ctrl->version.minormanuversion = 2; ctrl->profile.ncontroller = card->myid; ctrl->profile.nbchannel = card->bchans; ctrl->profile.goptions = GLOBAL_OPTION_INTERNAL_CONTROLLER | GLOBAL_OPTION_B_CHANNEL_OPERATION; ctrl->profile.support1 = B1_PROT_64KBIT_HDLC | (card->faxchans ? B1_PROT_T30 : 0) | B1_PROT_64KBIT_TRANSPARENT; ctrl->profile.support2 = B2_PROT_ISO7776 | (card->faxchans ? B2_PROT_T30 : 0) | B2_PROT_TRANSPARENT; ctrl->profile.support3 = B3_PROT_TRANSPARENT | B3_PROT_T90NL | (card->faxchans ? B3_PROT_T30 : 0) | (card->faxchans ? B3_PROT_T30EXT : 0) | B3_PROT_ISO8208; } int hycapi_capi_create(hysdn_card *card) { hycapictrl_info *cinfo = NULL; struct capi_ctr *ctrl = NULL; int retval; #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_capi_create\n"); #endif if ((hycapi_enable & (1 << card->myid)) == 0) { return 1; } if (!card->hyctrlinfo) { cinfo = kzalloc(sizeof(hycapictrl_info), GFP_ATOMIC); if (!cinfo) { printk(KERN_WARNING "HYSDN: no memory for capi-ctrl.\n"); return -ENOMEM; } card->hyctrlinfo = cinfo; cinfo->card = card; spin_lock_init(&cinfo->lock); INIT_LIST_HEAD(&cinfo->ncci_head); switch (card->brdtype) { case BD_PCCARD: strcpy(cinfo->cardname, "HYSDN Hycard"); break; case BD_ERGO: strcpy(cinfo->cardname, "HYSDN Ergo2"); break; case BD_METRO: strcpy(cinfo->cardname, "HYSDN Metro4"); break; case BD_CHAMP2: strcpy(cinfo->cardname, "HYSDN Champ2"); break; case BD_PLEXUS: strcpy(cinfo->cardname, "HYSDN Plexus30"); break; default: strcpy(cinfo->cardname, "HYSDN ???"); break; } ctrl = &cinfo->capi_ctrl; ctrl->driver_name = "hycapi"; ctrl->driverdata = cinfo; ctrl->register_appl = hycapi_register_appl; ctrl->release_appl = hycapi_release_appl; ctrl->send_message = hycapi_send_message; ctrl->load_firmware = hycapi_load_firmware; ctrl->reset_ctr = hycapi_reset_ctr; ctrl->procinfo = hycapi_procinfo; ctrl->proc_fops = &hycapi_proc_fops; strcpy(ctrl->name, cinfo->cardname); ctrl->owner = THIS_MODULE; retval = attach_capi_ctr(ctrl); if (retval) { printk(KERN_ERR "hycapi: attach controller failed.\n"); return -EBUSY; } /* fill in the blanks: */ hycapi_fill_profile(card); capi_ctr_ready(ctrl); } else { /* resume output on stopped ctrl */ ctrl = &card->hyctrlinfo->capi_ctrl; hycapi_fill_profile(card); capi_ctr_ready(ctrl); hycapi_restart_internal(ctrl); /* ctrl->resume_output(ctrl); */ } return 0; }
gpl-2.0
jfdsmabalot/kernel_linux-3.4.y
kernel/time/timer_stats.c
6423
10145
/* * kernel/time/timer_stats.c * * Collect timer usage statistics. * * Copyright(C) 2006, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * timer_stats is based on timer_top, a similar functionality which was part of * Con Kolivas dyntick patch set. It was developed by Daniel Petrini at the * Instituto Nokia de Tecnologia - INdT - Manaus. timer_top's design was based * on dynamic allocation of the statistics entries and linear search based * lookup combined with a global lock, rather than the static array, hash * and per-CPU locking which is used by timer_stats. It was written for the * pre hrtimer kernel code and therefore did not take hrtimers into account. * Nevertheless it provided the base for the timer_stats implementation and * was a helpful source of inspiration. Kudos to Daniel and the Nokia folks * for this effort. * * timer_top.c is * Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus * Written by Daniel Petrini <d.pensator@gmail.com> * timer_top.c was released under the GNU General Public License version 2 * * We export the addresses and counting of timer functions being called, * the pid and cmdline from the owner process if applicable. * * Start/stop data collection: * # echo [1|0] >/proc/timer_stats * * Display the information collected so far: * # cat /proc/timer_stats * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <asm/uaccess.h> /* * This is our basic unit of interest: a timer expiry event identified * by the timer, its start/expire functions and the PID of the task that * started the timer. We count the number of times an event happens: */ struct entry { /* * Hash list: */ struct entry *next; /* * Hash keys: */ void *timer; void *start_func; void *expire_func; pid_t pid; /* * Number of timeout events: */ unsigned long count; unsigned int timer_flag; /* * We save the command-line string to preserve * this information past task exit: */ char comm[TASK_COMM_LEN + 1]; } ____cacheline_aligned_in_smp; /* * Spinlock protecting the tables - not taken during lookup: */ static DEFINE_RAW_SPINLOCK(table_lock); /* * Per-CPU lookup locks for fast hash lookup: */ static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock); /* * Mutex to serialize state changes with show-stats activities: */ static DEFINE_MUTEX(show_mutex); /* * Collection status, active/inactive: */ int __read_mostly timer_stats_active; /* * Beginning/end timestamps of measurement: */ static ktime_t time_start, time_stop; /* * tstat entry structs only get allocated while collection is * active and never freed during that time - this simplifies * things quite a bit. * * They get freed when a new collection period is started. */ #define MAX_ENTRIES_BITS 10 #define MAX_ENTRIES (1UL << MAX_ENTRIES_BITS) static unsigned long nr_entries; static struct entry entries[MAX_ENTRIES]; static atomic_t overflow_count; /* * The entries are in a hash-table, for fast lookup: */ #define TSTAT_HASH_BITS (MAX_ENTRIES_BITS - 1) #define TSTAT_HASH_SIZE (1UL << TSTAT_HASH_BITS) #define TSTAT_HASH_MASK (TSTAT_HASH_SIZE - 1) #define __tstat_hashfn(entry) \ (((unsigned long)(entry)->timer ^ \ (unsigned long)(entry)->start_func ^ \ (unsigned long)(entry)->expire_func ^ \ (unsigned long)(entry)->pid ) & TSTAT_HASH_MASK) #define tstat_hashentry(entry) (tstat_hash_table + __tstat_hashfn(entry)) static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly; static void reset_entries(void) { nr_entries = 0; memset(entries, 0, sizeof(entries)); memset(tstat_hash_table, 0, sizeof(tstat_hash_table)); atomic_set(&overflow_count, 0); } static struct entry *alloc_entry(void) { if (nr_entries >= MAX_ENTRIES) return NULL; return entries + nr_entries++; } static int match_entries(struct entry *entry1, struct entry *entry2) { return entry1->timer == entry2->timer && entry1->start_func == entry2->start_func && entry1->expire_func == entry2->expire_func && entry1->pid == entry2->pid; } /* * Look up whether an entry matching this item is present * in the hash already. Must be called with irqs off and the * lookup lock held: */ static struct entry *tstat_lookup(struct entry *entry, char *comm) { struct entry **head, *curr, *prev; head = tstat_hashentry(entry); curr = *head; /* * The fastpath is when the entry is already hashed, * we do this with the lookup lock held, but with the * table lock not held: */ while (curr) { if (match_entries(curr, entry)) return curr; curr = curr->next; } /* * Slowpath: allocate, set up and link a new hash entry: */ prev = NULL; curr = *head; raw_spin_lock(&table_lock); /* * Make sure we have not raced with another CPU: */ while (curr) { if (match_entries(curr, entry)) goto out_unlock; prev = curr; curr = curr->next; } curr = alloc_entry(); if (curr) { *curr = *entry; curr->count = 0; curr->next = NULL; memcpy(curr->comm, comm, TASK_COMM_LEN); smp_mb(); /* Ensure that curr is initialized before insert */ if (prev) prev->next = curr; else *head = curr; } out_unlock: raw_spin_unlock(&table_lock); return curr; } /** * timer_stats_update_stats - Update the statistics for a timer. * @timer: pointer to either a timer_list or a hrtimer * @pid: the pid of the task which set up the timer * @startf: pointer to the function which did the timer setup * @timerf: pointer to the timer callback function of the timer * @comm: name of the process which set up the timer * * When the timer is already registered, then the event counter is * incremented. Otherwise the timer is registered in a free slot. */ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, void *timerf, char *comm, unsigned int timer_flag) { /* * It doesn't matter which lock we take: */ raw_spinlock_t *lock; struct entry *entry, input; unsigned long flags; if (likely(!timer_stats_active)) return; lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); input.timer = timer; input.start_func = startf; input.expire_func = timerf; input.pid = pid; input.timer_flag = timer_flag; raw_spin_lock_irqsave(lock, flags); if (!timer_stats_active) goto out_unlock; entry = tstat_lookup(&input, comm); if (likely(entry)) entry->count++; else atomic_inc(&overflow_count); out_unlock: raw_spin_unlock_irqrestore(lock, flags); } static void print_name_offset(struct seq_file *m, unsigned long addr) { char symname[KSYM_NAME_LEN]; if (lookup_symbol_name(addr, symname) < 0) seq_printf(m, "<%p>", (void *)addr); else seq_printf(m, "%s", symname); } static int tstats_show(struct seq_file *m, void *v) { struct timespec period; struct entry *entry; unsigned long ms; long events = 0; ktime_t time; int i; mutex_lock(&show_mutex); /* * If still active then calculate up to now: */ if (timer_stats_active) time_stop = ktime_get(); time = ktime_sub(time_stop, time_start); period = ktime_to_timespec(time); ms = period.tv_nsec / 1000000; seq_puts(m, "Timer Stats Version: v0.2\n"); seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); if (atomic_read(&overflow_count)) seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count)); for (i = 0; i < nr_entries; i++) { entry = entries + i; if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) { seq_printf(m, "%4luD, %5d %-16s ", entry->count, entry->pid, entry->comm); } else { seq_printf(m, " %4lu, %5d %-16s ", entry->count, entry->pid, entry->comm); } print_name_offset(m, (unsigned long)entry->start_func); seq_puts(m, " ("); print_name_offset(m, (unsigned long)entry->expire_func); seq_puts(m, ")\n"); events += entry->count; } ms += period.tv_sec * 1000; if (!ms) ms = 1; if (events && period.tv_sec) seq_printf(m, "%ld total events, %ld.%03ld events/sec\n", events, events * 1000 / ms, (events * 1000000 / ms) % 1000); else seq_printf(m, "%ld total events\n", events); mutex_unlock(&show_mutex); return 0; } /* * After a state change, make sure all concurrent lookup/update * activities have stopped: */ static void sync_access(void) { unsigned long flags; int cpu; for_each_online_cpu(cpu) { raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); raw_spin_lock_irqsave(lock, flags); /* nothing */ raw_spin_unlock_irqrestore(lock, flags); } } static ssize_t tstats_write(struct file *file, const char __user *buf, size_t count, loff_t *offs) { char ctl[2]; if (count != 2 || *offs) return -EINVAL; if (copy_from_user(ctl, buf, count)) return -EFAULT; mutex_lock(&show_mutex); switch (ctl[0]) { case '0': if (timer_stats_active) { timer_stats_active = 0; time_stop = ktime_get(); sync_access(); } break; case '1': if (!timer_stats_active) { reset_entries(); time_start = ktime_get(); smp_mb(); timer_stats_active = 1; } break; default: count = -EINVAL; } mutex_unlock(&show_mutex); return count; } static int tstats_open(struct inode *inode, struct file *filp) { return single_open(filp, tstats_show, NULL); } static const struct file_operations tstats_fops = { .open = tstats_open, .read = seq_read, .write = tstats_write, .llseek = seq_lseek, .release = single_release, }; void __init init_timer_stats(void) { int cpu; for_each_possible_cpu(cpu) raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); } static int __init init_tstats_procfs(void) { struct proc_dir_entry *pe; pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); if (!pe) return -ENOMEM; return 0; } __initcall(init_tstats_procfs);
gpl-2.0
Fechinator/FechdaKernel_V500
arch/mips/sgi-ip22/ip22-hpc.c
11799
1644
/* * ip22-hpc.c: Routines for generic manipulation of the HPC controllers. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1998 Ralf Baechle */ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ioc.h> #include <asm/sgi/ip22.h> struct hpc3_regs *hpc3c0, *hpc3c1; EXPORT_SYMBOL(hpc3c0); EXPORT_SYMBOL(hpc3c1); struct sgioc_regs *sgioc; EXPORT_SYMBOL(sgioc); /* We need software copies of these because they are write only. */ u8 sgi_ioc_reset, sgi_ioc_write; extern char *system_type; void __init sgihpc_init(void) { /* ioremap can't fail */ hpc3c0 = (struct hpc3_regs *) ioremap(HPC3_CHIP0_BASE, sizeof(struct hpc3_regs)); hpc3c1 = (struct hpc3_regs *) ioremap(HPC3_CHIP1_BASE, sizeof(struct hpc3_regs)); /* IOC lives in PBUS PIO channel 6 */ sgioc = (struct sgioc_regs *)hpc3c0->pbus_extregs[6]; hpc3c0->pbus_piocfg[6][0] |= HPC3_PIOCFG_DS16; if (ip22_is_fullhouse()) { /* Full House comes with INT2 which lives in PBUS PIO * channel 4 */ sgint = (struct sgint_regs *)hpc3c0->pbus_extregs[4]; system_type = "SGI Indigo2"; } else { /* Guiness comes with INT3 which is part of IOC */ sgint = &sgioc->int3; system_type = "SGI Indy"; } sgi_ioc_reset = (SGIOC_RESET_PPORT | SGIOC_RESET_KBDMOUSE | SGIOC_RESET_EISA | SGIOC_RESET_ISDN | SGIOC_RESET_LC0OFF); sgi_ioc_write = (SGIOC_WRITE_EASEL | SGIOC_WRITE_NTHRESH | SGIOC_WRITE_TPSPEED | SGIOC_WRITE_EPSEL | SGIOC_WRITE_U0AMODE | SGIOC_WRITE_U1AMODE); sgioc->reset = sgi_ioc_reset; sgioc->write = sgi_ioc_write; }
gpl-2.0
CyanogenMod/lge-kernel-sniper
fs/nls/nls_cp1250.c
12567
15419
/* * linux/fs/nls/nls_cp1250.c * * Charset cp1250 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x20ac, 0x0000, 0x201a, 0x0000, 0x201e, 0x2026, 0x2020, 0x2021, 0x0000, 0x2030, 0x0160, 0x2039, 0x015a, 0x0164, 0x017d, 0x0179, /* 0x90*/ 0x0000, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x2122, 0x0161, 0x203a, 0x015b, 0x0165, 0x017e, 0x017a, /* 0xa0*/ 0x00a0, 0x02c7, 0x02d8, 0x0141, 0x00a4, 0x0104, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x015e, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x017b, /* 0xb0*/ 0x00b0, 0x00b1, 0x02db, 0x0142, 0x00b4, 0x00b5, 0x00b6, 0x00b7, 0x00b8, 0x0105, 0x015f, 0x00bb, 0x013d, 0x02dd, 0x013e, 0x017c, /* 0xc0*/ 0x0154, 0x00c1, 0x00c2, 0x0102, 0x00c4, 0x0139, 0x0106, 0x00c7, 0x010c, 0x00c9, 0x0118, 0x00cb, 0x011a, 0x00cd, 0x00ce, 0x010e, /* 0xd0*/ 0x0110, 0x0143, 0x0147, 0x00d3, 0x00d4, 0x0150, 0x00d6, 0x00d7, 0x0158, 0x016e, 0x00da, 0x0170, 0x00dc, 0x00dd, 0x0162, 0x00df, /* 0xe0*/ 0x0155, 0x00e1, 0x00e2, 0x0103, 0x00e4, 0x013a, 0x0107, 0x00e7, 0x010d, 0x00e9, 0x0119, 0x00eb, 0x011b, 0x00ed, 0x00ee, 0x010f, /* 0xf0*/ 0x0111, 0x0144, 0x0148, 0x00f3, 0x00f4, 0x0151, 0x00f6, 0x00f7, 0x0159, 0x016f, 0x00fa, 0x0171, 0x00fc, 0x00fd, 0x0163, 0x02d9, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */ 0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */ 0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0xc3, 0xe3, 0xa5, 0xb9, 0xc6, 0xe6, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */ 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0xc5, 0xe5, 0x00, 0x00, 0xbc, 0xbe, 0x00, /* 0x38-0x3f */ 0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */ 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */ 0xd8, 0xf8, 0x8c, 0x9c, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */ 0x8a, 0x9a, 0xde, 0xfe, 0x8d, 0x9d, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */ 0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x8f, 0x9f, 0xaf, 0xbf, 0x8e, 0x9e, 0x00, /* 0x78-0x7f */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */ 0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x00, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xb3, 0xa4, 0xb9, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbe, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x00, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xa3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xa5, 0xaa, 0xbb, 0xbc, 0xbd, 0xbc, 0xaf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0x00, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp1250", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp1250(void) { return register_nls(&table); } static void __exit exit_nls_cp1250(void) { unregister_nls(&table); } module_init(init_nls_cp1250) module_exit(exit_nls_cp1250) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
FrancescoCG/CrazySuperKernel-CM13-KLTE-NEW-REBASE
fs/crypto/policy.c
24
6574
/* * Encryption policy functions for per-file encryption support. * * Copyright (C) 2015, Google, Inc. * Copyright (C) 2015, Motorola Mobility. * * Written by Michael Halcrow, 2015. * Modified by Jaegeuk Kim, 2015. */ #include <linux/module.h> #include <linux/random.h> #include <linux/string.h> #include <linux/fscrypto.h> static int inode_has_encryption_context(struct inode *inode) { if (!inode->i_sb->s_cop->get_context) return 0; return (inode->i_sb->s_cop->get_context(inode, NULL, 0L) > 0); } /* * check whether the policy is consistent with the encryption context * for the inode */ static int is_encryption_context_consistent_with_policy(struct inode *inode, const struct fscrypt_policy *policy) { struct fscrypt_context ctx; int res; if (!inode->i_sb->s_cop->get_context) return 0; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res != sizeof(ctx)) return 0; return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE) == 0 && (ctx.flags == policy->flags) && (ctx.contents_encryption_mode == policy->contents_encryption_mode) && (ctx.filenames_encryption_mode == policy->filenames_encryption_mode)); } static int create_encryption_context_from_policy(struct inode *inode, const struct fscrypt_policy *policy) { struct fscrypt_context ctx; int res; if (!inode->i_sb->s_cop->set_context) return -EOPNOTSUPP; if (inode->i_sb->s_cop->prepare_context) { res = inode->i_sb->s_cop->prepare_context(inode); if (res) return res; } ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE); if (!fscrypt_valid_contents_enc_mode( policy->contents_encryption_mode)) { printk(KERN_WARNING "%s: Invalid contents encryption mode %d\n", __func__, policy->contents_encryption_mode); return -EINVAL; } if (!fscrypt_valid_filenames_enc_mode( policy->filenames_encryption_mode)) { printk(KERN_WARNING "%s: Invalid filenames encryption mode %d\n", __func__, policy->filenames_encryption_mode); return -EINVAL; } if (policy->flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; ctx.contents_encryption_mode = policy->contents_encryption_mode; ctx.filenames_encryption_mode = policy->filenames_encryption_mode; ctx.flags = policy->flags; BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE); get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL); } int fscrypt_process_policy(struct inode *inode, const struct fscrypt_policy *policy) { if (policy->version != 0) return -EINVAL; if (!inode_has_encryption_context(inode)) { if (!inode->i_sb->s_cop->empty_dir) return -EOPNOTSUPP; if (!inode->i_sb->s_cop->empty_dir(inode)) return -ENOTEMPTY; return create_encryption_context_from_policy(inode, policy); } if (is_encryption_context_consistent_with_policy(inode, policy)) return 0; printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n", __func__); return -EINVAL; } EXPORT_SYMBOL(fscrypt_process_policy); int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy) { struct fscrypt_context ctx; int res; if (!inode->i_sb->s_cop->get_context || !inode->i_sb->s_cop->is_encrypted(inode)) return -ENODATA; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res != sizeof(ctx)) return -ENODATA; if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; policy->version = 0; policy->contents_encryption_mode = ctx.contents_encryption_mode; policy->filenames_encryption_mode = ctx.filenames_encryption_mode; policy->flags = ctx.flags; memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, FS_KEY_DESCRIPTOR_SIZE); return 0; } EXPORT_SYMBOL(fscrypt_get_policy); int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { struct fscrypt_info *parent_ci, *child_ci; int res; if ((parent == NULL) || (child == NULL)) { printk(KERN_ERR "parent %p child %p\n", parent, child); BUG_ON(1); } /* no restrictions if the parent directory is not encrypted */ if (!parent->i_sb->s_cop->is_encrypted(parent)) return 1; /* if the child directory is not encrypted, this is always a problem */ if (!parent->i_sb->s_cop->is_encrypted(child)) return 0; res = fscrypt_get_encryption_info(parent); if (res) return 0; res = fscrypt_get_encryption_info(child); if (res) return 0; parent_ci = parent->i_crypt_info; child_ci = child->i_crypt_info; if (!parent_ci && !child_ci) return 1; if (!parent_ci || !child_ci) return 0; return (memcmp(parent_ci->ci_master_key, child_ci->ci_master_key, FS_KEY_DESCRIPTOR_SIZE) == 0 && (parent_ci->ci_data_mode == child_ci->ci_data_mode) && (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && (parent_ci->ci_flags == child_ci->ci_flags)); } EXPORT_SYMBOL(fscrypt_has_permitted_context); /** * fscrypt_inherit_context() - Sets a child context from its parent * @parent: Parent inode from which the context is inherited. * @child: Child inode that inherits the context from @parent. * @fs_data: private data given by FS. * @preload: preload child i_crypt_info * * Return: Zero on success, non-zero otherwise */ int fscrypt_inherit_context(struct inode *parent, struct inode *child, void *fs_data, bool preload) { struct fscrypt_context ctx; struct fscrypt_info *ci; int res; if (!parent->i_sb->s_cop->set_context) return -EOPNOTSUPP; res = fscrypt_get_encryption_info(parent); if (res < 0) return res; ci = parent->i_crypt_info; if (ci == NULL) return -ENOKEY; ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; if (fscrypt_dummy_context_enabled(parent)) { ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; ctx.flags = 0; memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); res = 0; } else { ctx.contents_encryption_mode = ci->ci_data_mode; ctx.filenames_encryption_mode = ci->ci_filename_mode; ctx.flags = ci->ci_flags; memcpy(ctx.master_key_descriptor, ci->ci_master_key, FS_KEY_DESCRIPTOR_SIZE); } get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); res = parent->i_sb->s_cop->set_context(child, &ctx, sizeof(ctx), fs_data); if (res) return res; return preload ? fscrypt_get_encryption_info(child): 0; } EXPORT_SYMBOL(fscrypt_inherit_context);
gpl-2.0
Xilinx/binutils-gdb
gas/dwarf2dbg.c
24
51272
/* dwarf2dbg.c - DWARF2 debug support Copyright (C) 1999-2014 Free Software Foundation, Inc. Contributed by David Mosberger-Tang <davidm@hpl.hp.com> This file is part of GAS, the GNU Assembler. GAS is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GAS; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Logical line numbers can be controlled by the compiler via the following directives: .file FILENO "file.c" .loc FILENO LINENO [COLUMN] [basic_block] [prologue_end] \ [epilogue_begin] [is_stmt VALUE] [isa VALUE] \ [discriminator VALUE] */ #include "as.h" #include "safe-ctype.h" #ifdef HAVE_LIMITS_H #include <limits.h> #else #ifdef HAVE_SYS_PARAM_H #include <sys/param.h> #endif #ifndef INT_MAX #define INT_MAX (int) (((unsigned) (-1)) >> 1) #endif #endif #include "dwarf2dbg.h" #include <filenames.h> #ifdef HAVE_DOS_BASED_FILE_SYSTEM /* We need to decide which character to use as a directory separator. Just because HAVE_DOS_BASED_FILE_SYSTEM is defined, it does not necessarily mean that the backslash character is the one to use. Some environments, eg Cygwin, can support both naming conventions. So we use the heuristic that we only need to use the backslash if the path is an absolute path starting with a DOS style drive selector. eg C: or D: */ # define INSERT_DIR_SEPARATOR(string, offset) \ do \ { \ if (offset > 1 \ && string[0] != 0 \ && string[1] == ':') \ string [offset] = '\\'; \ else \ string [offset] = '/'; \ } \ while (0) #else # define INSERT_DIR_SEPARATOR(string, offset) string[offset] = '/' #endif #ifndef DWARF2_FORMAT # define DWARF2_FORMAT(SEC) dwarf2_format_32bit #endif #ifndef DWARF2_ADDR_SIZE # define DWARF2_ADDR_SIZE(bfd) (bfd_arch_bits_per_address (bfd) / 8) #endif #ifndef DWARF2_FILE_NAME #define DWARF2_FILE_NAME(FILENAME, DIRNAME) FILENAME #endif #ifndef DWARF2_FILE_TIME_NAME #define DWARF2_FILE_TIME_NAME(FILENAME,DIRNAME) 0 #endif #ifndef DWARF2_FILE_SIZE_NAME #define DWARF2_FILE_SIZE_NAME(FILENAME,DIRNAME) 0 #endif #ifndef DWARF2_VERSION #define DWARF2_VERSION 2 #endif /* The .debug_aranges version has been 2 in DWARF version 2, 3 and 4. */ #ifndef DWARF2_ARANGES_VERSION #define DWARF2_ARANGES_VERSION 2 #endif /* This implementation output version 2 .debug_line information. */ #ifndef DWARF2_LINE_VERSION #define DWARF2_LINE_VERSION 2 #endif #include "subsegs.h" #include "dwarf2.h" /* Since we can't generate the prolog until the body is complete, we use three different subsegments for .debug_line: one holding the prolog, one for the directory and filename info, and one for the body ("statement program"). */ #define DL_PROLOG 0 #define DL_FILES 1 #define DL_BODY 2 /* If linker relaxation might change offsets in the code, the DWARF special opcodes and variable-length operands cannot be used. If this macro is nonzero, use the DW_LNS_fixed_advance_pc opcode instead. */ #ifndef DWARF2_USE_FIXED_ADVANCE_PC # define DWARF2_USE_FIXED_ADVANCE_PC linkrelax #endif /* First special line opcde - leave room for the standard opcodes. Note: If you want to change this, you'll have to update the "standard_opcode_lengths" table that is emitted below in out_debug_line(). */ #define DWARF2_LINE_OPCODE_BASE 13 #ifndef DWARF2_LINE_BASE /* Minimum line offset in a special line info. opcode. This value was chosen to give a reasonable range of values. */ # define DWARF2_LINE_BASE -5 #endif /* Range of line offsets in a special line info. opcode. */ #ifndef DWARF2_LINE_RANGE # define DWARF2_LINE_RANGE 14 #endif #ifndef DWARF2_LINE_MIN_INSN_LENGTH /* Define the architecture-dependent minimum instruction length (in bytes). This value should be rather too small than too big. */ # define DWARF2_LINE_MIN_INSN_LENGTH 1 #endif /* Flag that indicates the initial value of the is_stmt_start flag. */ #define DWARF2_LINE_DEFAULT_IS_STMT 1 /* Given a special op, return the line skip amount. */ #define SPECIAL_LINE(op) \ (((op) - DWARF2_LINE_OPCODE_BASE)%DWARF2_LINE_RANGE + DWARF2_LINE_BASE) /* Given a special op, return the address skip amount (in units of DWARF2_LINE_MIN_INSN_LENGTH. */ #define SPECIAL_ADDR(op) (((op) - DWARF2_LINE_OPCODE_BASE)/DWARF2_LINE_RANGE) /* The maximum address skip amount that can be encoded with a special op. */ #define MAX_SPECIAL_ADDR_DELTA SPECIAL_ADDR(255) #ifndef TC_PARSE_CONS_RETURN_NONE #define TC_PARSE_CONS_RETURN_NONE BFD_RELOC_NONE #endif struct line_entry { struct line_entry *next; symbolS *label; struct dwarf2_line_info loc; }; struct line_subseg { struct line_subseg *next; subsegT subseg; struct line_entry *head; struct line_entry **ptail; struct line_entry **pmove_tail; }; struct line_seg { struct line_seg *next; segT seg; struct line_subseg *head; symbolS *text_start; symbolS *text_end; }; /* Collects data for all line table entries during assembly. */ static struct line_seg *all_segs; static struct line_seg **last_seg_ptr; struct file_entry { const char *filename; unsigned int dir; }; /* Table of files used by .debug_line. */ static struct file_entry *files; static unsigned int files_in_use; static unsigned int files_allocated; /* Table of directories used by .debug_line. */ static char **dirs; static unsigned int dirs_in_use; static unsigned int dirs_allocated; /* TRUE when we've seen a .loc directive recently. Used to avoid doing work when there's nothing to do. */ bfd_boolean dwarf2_loc_directive_seen; /* TRUE when we're supposed to set the basic block mark whenever a label is seen. */ bfd_boolean dwarf2_loc_mark_labels; /* Current location as indicated by the most recent .loc directive. */ static struct dwarf2_line_info current = { 1, 1, 0, 0, DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0, 0 }; /* The size of an address on the target. */ static unsigned int sizeof_address; static unsigned int get_filenum (const char *, unsigned int); #ifndef TC_DWARF2_EMIT_OFFSET #define TC_DWARF2_EMIT_OFFSET generic_dwarf2_emit_offset /* Create an offset to .dwarf2_*. */ static void generic_dwarf2_emit_offset (symbolS *symbol, unsigned int size) { expressionS exp; exp.X_op = O_symbol; exp.X_add_symbol = symbol; exp.X_add_number = 0; emit_expr (&exp, size); } #endif /* Find or create (if CREATE_P) an entry for SEG+SUBSEG in ALL_SEGS. */ static struct line_subseg * get_line_subseg (segT seg, subsegT subseg, bfd_boolean create_p) { struct line_seg *s = seg_info (seg)->dwarf2_line_seg; struct line_subseg **pss, *lss; if (s == NULL) { if (!create_p) return NULL; s = (struct line_seg *) xmalloc (sizeof (*s)); s->next = NULL; s->seg = seg; s->head = NULL; *last_seg_ptr = s; last_seg_ptr = &s->next; seg_info (seg)->dwarf2_line_seg = s; } gas_assert (seg == s->seg); for (pss = &s->head; (lss = *pss) != NULL ; pss = &lss->next) { if (lss->subseg == subseg) goto found_subseg; if (lss->subseg > subseg) break; } lss = (struct line_subseg *) xmalloc (sizeof (*lss)); lss->next = *pss; lss->subseg = subseg; lss->head = NULL; lss->ptail = &lss->head; lss->pmove_tail = &lss->head; *pss = lss; found_subseg: return lss; } /* Record an entry for LOC occurring at LABEL. */ static void dwarf2_gen_line_info_1 (symbolS *label, struct dwarf2_line_info *loc) { struct line_subseg *lss; struct line_entry *e; e = (struct line_entry *) xmalloc (sizeof (*e)); e->next = NULL; e->label = label; e->loc = *loc; lss = get_line_subseg (now_seg, now_subseg, TRUE); *lss->ptail = e; lss->ptail = &e->next; } /* Record an entry for LOC occurring at OFS within the current fragment. */ void dwarf2_gen_line_info (addressT ofs, struct dwarf2_line_info *loc) { static unsigned int line = -1; static unsigned int filenum = -1; symbolS *sym; /* Early out for as-yet incomplete location information. */ if (loc->filenum == 0 || loc->line == 0) return; /* Don't emit sequences of line symbols for the same line when the symbols apply to assembler code. It is necessary to emit duplicate line symbols when a compiler asks for them, because GDB uses them to determine the end of the prologue. */ if (debug_type == DEBUG_DWARF2 && line == loc->line && filenum == loc->filenum) return; line = loc->line; filenum = loc->filenum; if (linkrelax) { char name[120]; /* Use a non-fake name for the line number location, so that it can be referred to by relocations. */ sprintf (name, ".Loc.%u.%u", line, filenum); sym = symbol_new (name, now_seg, ofs, frag_now); } else sym = symbol_temp_new (now_seg, ofs, frag_now); dwarf2_gen_line_info_1 (sym, loc); } /* Returns the current source information. If .file directives have been encountered, the info for the corresponding source file is returned. Otherwise, the info for the assembly source file is returned. */ void dwarf2_where (struct dwarf2_line_info *line) { if (debug_type == DEBUG_DWARF2) { char *filename; as_where (&filename, &line->line); line->filenum = get_filenum (filename, 0); line->column = 0; line->flags = DWARF2_FLAG_IS_STMT; line->isa = current.isa; line->discriminator = current.discriminator; } else *line = current; } /* A hook to allow the target backend to inform the line number state machine of isa changes when assembler debug info is enabled. */ void dwarf2_set_isa (unsigned int isa) { current.isa = isa; } /* Called for each machine instruction, or relatively atomic group of machine instructions (ie built-in macro). The instruction or group is SIZE bytes in length. If dwarf2 line number generation is called for, emit a line statement appropriately. */ void dwarf2_emit_insn (int size) { struct dwarf2_line_info loc; if (!dwarf2_loc_directive_seen && debug_type != DEBUG_DWARF2) return; dwarf2_where (&loc); dwarf2_gen_line_info (frag_now_fix () - size, &loc); dwarf2_consume_line_info (); } /* Move all previously-emitted line entries for the current position by DELTA bytes. This function cannot be used to move the same entries twice. */ void dwarf2_move_insn (int delta) { struct line_subseg *lss; struct line_entry *e; valueT now; if (delta == 0) return; lss = get_line_subseg (now_seg, now_subseg, FALSE); if (!lss) return; now = frag_now_fix (); while ((e = *lss->pmove_tail)) { if (S_GET_VALUE (e->label) == now) S_SET_VALUE (e->label, now + delta); lss->pmove_tail = &e->next; } } /* Called after the current line information has been either used with dwarf2_gen_line_info or saved with a machine instruction for later use. This resets the state of the line number information to reflect that it has been used. */ void dwarf2_consume_line_info (void) { /* Unless we generate DWARF2 debugging information for each assembler line, we only emit one line symbol for one LOC. */ dwarf2_loc_directive_seen = FALSE; current.flags &= ~(DWARF2_FLAG_BASIC_BLOCK | DWARF2_FLAG_PROLOGUE_END | DWARF2_FLAG_EPILOGUE_BEGIN); current.discriminator = 0; } /* Called for each (preferably code) label. If dwarf2_loc_mark_labels is enabled, emit a basic block marker. */ void dwarf2_emit_label (symbolS *label) { struct dwarf2_line_info loc; if (!dwarf2_loc_mark_labels) return; if (S_GET_SEGMENT (label) != now_seg) return; if (!(bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)) return; if (files_in_use == 0 && debug_type != DEBUG_DWARF2) return; dwarf2_where (&loc); loc.flags |= DWARF2_FLAG_BASIC_BLOCK; dwarf2_gen_line_info_1 (label, &loc); dwarf2_consume_line_info (); } /* Get a .debug_line file number for FILENAME. If NUM is nonzero, allocate it on that file table slot, otherwise return the first empty one. */ static unsigned int get_filenum (const char *filename, unsigned int num) { static unsigned int last_used, last_used_dir_len; const char *file; size_t dir_len; unsigned int i, dir; if (num == 0 && last_used) { if (! files[last_used].dir && filename_cmp (filename, files[last_used].filename) == 0) return last_used; if (files[last_used].dir && filename_ncmp (filename, dirs[files[last_used].dir], last_used_dir_len) == 0 && IS_DIR_SEPARATOR (filename [last_used_dir_len]) && filename_cmp (filename + last_used_dir_len + 1, files[last_used].filename) == 0) return last_used; } file = lbasename (filename); /* Don't make empty string from / or A: from A:/ . */ #ifdef HAVE_DOS_BASED_FILE_SYSTEM if (file <= filename + 3) file = filename; #else if (file == filename + 1) file = filename; #endif dir_len = file - filename; dir = 0; if (dir_len) { #ifndef DWARF2_DIR_SHOULD_END_WITH_SEPARATOR --dir_len; #endif for (dir = 1; dir < dirs_in_use; ++dir) if (filename_ncmp (filename, dirs[dir], dir_len) == 0 && dirs[dir][dir_len] == '\0') break; if (dir >= dirs_in_use) { if (dir >= dirs_allocated) { dirs_allocated = dir + 32; dirs = (char **) xrealloc (dirs, (dir + 32) * sizeof (const char *)); } dirs[dir] = (char *) xmalloc (dir_len + 1); memcpy (dirs[dir], filename, dir_len); dirs[dir][dir_len] = '\0'; dirs_in_use = dir + 1; } } if (num == 0) { for (i = 1; i < files_in_use; ++i) if (files[i].dir == dir && files[i].filename && filename_cmp (file, files[i].filename) == 0) { last_used = i; last_used_dir_len = dir_len; return i; } } else i = num; if (i >= files_allocated) { unsigned int old = files_allocated; files_allocated = i + 32; files = (struct file_entry *) xrealloc (files, (i + 32) * sizeof (struct file_entry)); memset (files + old, 0, (i + 32 - old) * sizeof (struct file_entry)); } files[i].filename = num ? file : xstrdup (file); files[i].dir = dir; if (files_in_use < i + 1) files_in_use = i + 1; last_used = i; last_used_dir_len = dir_len; return i; } /* Handle two forms of .file directive: - Pass .file "source.c" to s_app_file - Handle .file 1 "source.c" by adding an entry to the DWARF-2 file table If an entry is added to the file table, return a pointer to the filename. */ char * dwarf2_directive_file (int dummy ATTRIBUTE_UNUSED) { offsetT num; char *filename; int filename_len; /* Continue to accept a bare string and pass it off. */ SKIP_WHITESPACE (); if (*input_line_pointer == '"') { s_app_file (0); return NULL; } num = get_absolute_expression (); filename = demand_copy_C_string (&filename_len); if (filename == NULL) return NULL; demand_empty_rest_of_line (); if (num < 1) { as_bad (_("file number less than one")); return NULL; } /* A .file directive implies compiler generated debug information is being supplied. Turn off gas generated debug info. */ debug_type = DEBUG_NONE; if (num < (int) files_in_use && files[num].filename != 0) { as_bad (_("file number %ld already allocated"), (long) num); return NULL; } get_filenum (filename, num); return filename; } void dwarf2_directive_loc (int dummy ATTRIBUTE_UNUSED) { offsetT filenum, line; /* If we see two .loc directives in a row, force the first one to be output now. */ if (dwarf2_loc_directive_seen) dwarf2_emit_insn (0); filenum = get_absolute_expression (); SKIP_WHITESPACE (); line = get_absolute_expression (); if (filenum < 1) { as_bad (_("file number less than one")); return; } if (filenum >= (int) files_in_use || files[filenum].filename == 0) { as_bad (_("unassigned file number %ld"), (long) filenum); return; } current.filenum = filenum; current.line = line; current.discriminator = 0; #ifndef NO_LISTING if (listing) { if (files[filenum].dir) { size_t dir_len = strlen (dirs[files[filenum].dir]); size_t file_len = strlen (files[filenum].filename); char *cp = (char *) alloca (dir_len + 1 + file_len + 1); memcpy (cp, dirs[files[filenum].dir], dir_len); INSERT_DIR_SEPARATOR (cp, dir_len); memcpy (cp + dir_len + 1, files[filenum].filename, file_len); cp[dir_len + file_len + 1] = '\0'; listing_source_file (cp); } else listing_source_file (files[filenum].filename); listing_source_line (line); } #endif SKIP_WHITESPACE (); if (ISDIGIT (*input_line_pointer)) { current.column = get_absolute_expression (); SKIP_WHITESPACE (); } while (ISALPHA (*input_line_pointer)) { char *p, c; offsetT value; p = input_line_pointer; c = get_symbol_end (); if (strcmp (p, "basic_block") == 0) { current.flags |= DWARF2_FLAG_BASIC_BLOCK; *input_line_pointer = c; } else if (strcmp (p, "prologue_end") == 0) { current.flags |= DWARF2_FLAG_PROLOGUE_END; *input_line_pointer = c; } else if (strcmp (p, "epilogue_begin") == 0) { current.flags |= DWARF2_FLAG_EPILOGUE_BEGIN; *input_line_pointer = c; } else if (strcmp (p, "is_stmt") == 0) { *input_line_pointer = c; value = get_absolute_expression (); if (value == 0) current.flags &= ~DWARF2_FLAG_IS_STMT; else if (value == 1) current.flags |= DWARF2_FLAG_IS_STMT; else { as_bad (_("is_stmt value not 0 or 1")); return; } } else if (strcmp (p, "isa") == 0) { *input_line_pointer = c; value = get_absolute_expression (); if (value >= 0) current.isa = value; else { as_bad (_("isa number less than zero")); return; } } else if (strcmp (p, "discriminator") == 0) { *input_line_pointer = c; value = get_absolute_expression (); if (value >= 0) current.discriminator = value; else { as_bad (_("discriminator less than zero")); return; } } else { as_bad (_("unknown .loc sub-directive `%s'"), p); *input_line_pointer = c; return; } SKIP_WHITESPACE (); } demand_empty_rest_of_line (); dwarf2_loc_directive_seen = TRUE; debug_type = DEBUG_NONE; } void dwarf2_directive_loc_mark_labels (int dummy ATTRIBUTE_UNUSED) { offsetT value = get_absolute_expression (); if (value != 0 && value != 1) { as_bad (_("expected 0 or 1")); ignore_rest_of_line (); } else { dwarf2_loc_mark_labels = value != 0; demand_empty_rest_of_line (); } } static struct frag * first_frag_for_seg (segT seg) { return seg_info (seg)->frchainP->frch_root; } static struct frag * last_frag_for_seg (segT seg) { frchainS *f = seg_info (seg)->frchainP; while (f->frch_next != NULL) f = f->frch_next; return f->frch_last; } /* Emit a single byte into the current segment. */ static inline void out_byte (int byte) { FRAG_APPEND_1_CHAR (byte); } /* Emit a statement program opcode into the current segment. */ static inline void out_opcode (int opc) { out_byte (opc); } /* Emit a two-byte word into the current segment. */ static inline void out_two (int data) { md_number_to_chars (frag_more (2), data, 2); } /* Emit a four byte word into the current segment. */ static inline void out_four (int data) { md_number_to_chars (frag_more (4), data, 4); } /* Emit an unsigned "little-endian base 128" number. */ static void out_uleb128 (addressT value) { output_leb128 (frag_more (sizeof_leb128 (value, 0)), value, 0); } /* Emit a signed "little-endian base 128" number. */ static void out_leb128 (addressT value) { output_leb128 (frag_more (sizeof_leb128 (value, 1)), value, 1); } /* Emit a tuple for .debug_abbrev. */ static inline void out_abbrev (int name, int form) { out_uleb128 (name); out_uleb128 (form); } /* Get the size of a fragment. */ static offsetT get_frag_fix (fragS *frag, segT seg) { frchainS *fr; if (frag->fr_next) return frag->fr_fix; /* If a fragment is the last in the chain, special measures must be taken to find its size before relaxation, since it may be pending on some subsegment chain. */ for (fr = seg_info (seg)->frchainP; fr; fr = fr->frch_next) if (fr->frch_last == frag) return (char *) obstack_next_free (&fr->frch_obstack) - frag->fr_literal; abort (); } /* Set an absolute address (may result in a relocation entry). */ static void out_set_addr (symbolS *sym) { expressionS exp; out_opcode (DW_LNS_extended_op); out_uleb128 (sizeof_address + 1); out_opcode (DW_LNE_set_address); exp.X_op = O_symbol; exp.X_add_symbol = sym; exp.X_add_number = 0; emit_expr (&exp, sizeof_address); } static void scale_addr_delta (addressT *); static void scale_addr_delta (addressT *addr_delta) { static int printed_this = 0; if (DWARF2_LINE_MIN_INSN_LENGTH > 1) { if (*addr_delta % DWARF2_LINE_MIN_INSN_LENGTH != 0 && !printed_this) { as_bad("unaligned opcodes detected in executable segment"); printed_this = 1; } *addr_delta /= DWARF2_LINE_MIN_INSN_LENGTH; } } /* Encode a pair of line and address skips as efficiently as possible. Note that the line skip is signed, whereas the address skip is unsigned. The following two routines *must* be kept in sync. This is enforced by making emit_inc_line_addr abort if we do not emit exactly the expected number of bytes. */ static int size_inc_line_addr (int line_delta, addressT addr_delta) { unsigned int tmp, opcode; int len = 0; /* Scale the address delta by the minimum instruction length. */ scale_addr_delta (&addr_delta); /* INT_MAX is a signal that this is actually a DW_LNE_end_sequence. We cannot use special opcodes here, since we want the end_sequence to emit the matrix entry. */ if (line_delta == INT_MAX) { if (addr_delta == MAX_SPECIAL_ADDR_DELTA) len = 1; else len = 1 + sizeof_leb128 (addr_delta, 0); return len + 3; } /* Bias the line delta by the base. */ tmp = line_delta - DWARF2_LINE_BASE; /* If the line increment is out of range of a special opcode, we must encode it with DW_LNS_advance_line. */ if (tmp >= DWARF2_LINE_RANGE) { len = 1 + sizeof_leb128 (line_delta, 1); line_delta = 0; tmp = 0 - DWARF2_LINE_BASE; } /* Bias the opcode by the special opcode base. */ tmp += DWARF2_LINE_OPCODE_BASE; /* Avoid overflow when addr_delta is large. */ if (addr_delta < 256 + MAX_SPECIAL_ADDR_DELTA) { /* Try using a special opcode. */ opcode = tmp + addr_delta * DWARF2_LINE_RANGE; if (opcode <= 255) return len + 1; /* Try using DW_LNS_const_add_pc followed by special op. */ opcode = tmp + (addr_delta - MAX_SPECIAL_ADDR_DELTA) * DWARF2_LINE_RANGE; if (opcode <= 255) return len + 2; } /* Otherwise use DW_LNS_advance_pc. */ len += 1 + sizeof_leb128 (addr_delta, 0); /* DW_LNS_copy or special opcode. */ len += 1; return len; } static void emit_inc_line_addr (int line_delta, addressT addr_delta, char *p, int len) { unsigned int tmp, opcode; int need_copy = 0; char *end = p + len; /* Line number sequences cannot go backward in addresses. This means we've incorrectly ordered the statements in the sequence. */ gas_assert ((offsetT) addr_delta >= 0); /* Scale the address delta by the minimum instruction length. */ scale_addr_delta (&addr_delta); /* INT_MAX is a signal that this is actually a DW_LNE_end_sequence. We cannot use special opcodes here, since we want the end_sequence to emit the matrix entry. */ if (line_delta == INT_MAX) { if (addr_delta == MAX_SPECIAL_ADDR_DELTA) *p++ = DW_LNS_const_add_pc; else { *p++ = DW_LNS_advance_pc; p += output_leb128 (p, addr_delta, 0); } *p++ = DW_LNS_extended_op; *p++ = 1; *p++ = DW_LNE_end_sequence; goto done; } /* Bias the line delta by the base. */ tmp = line_delta - DWARF2_LINE_BASE; /* If the line increment is out of range of a special opcode, we must encode it with DW_LNS_advance_line. */ if (tmp >= DWARF2_LINE_RANGE) { *p++ = DW_LNS_advance_line; p += output_leb128 (p, line_delta, 1); line_delta = 0; tmp = 0 - DWARF2_LINE_BASE; need_copy = 1; } /* Prettier, I think, to use DW_LNS_copy instead of a "line +0, addr +0" special opcode. */ if (line_delta == 0 && addr_delta == 0) { *p++ = DW_LNS_copy; goto done; } /* Bias the opcode by the special opcode base. */ tmp += DWARF2_LINE_OPCODE_BASE; /* Avoid overflow when addr_delta is large. */ if (addr_delta < 256 + MAX_SPECIAL_ADDR_DELTA) { /* Try using a special opcode. */ opcode = tmp + addr_delta * DWARF2_LINE_RANGE; if (opcode <= 255) { *p++ = opcode; goto done; } /* Try using DW_LNS_const_add_pc followed by special op. */ opcode = tmp + (addr_delta - MAX_SPECIAL_ADDR_DELTA) * DWARF2_LINE_RANGE; if (opcode <= 255) { *p++ = DW_LNS_const_add_pc; *p++ = opcode; goto done; } } /* Otherwise use DW_LNS_advance_pc. */ *p++ = DW_LNS_advance_pc; p += output_leb128 (p, addr_delta, 0); if (need_copy) *p++ = DW_LNS_copy; else *p++ = tmp; done: gas_assert (p == end); } /* Handy routine to combine calls to the above two routines. */ static void out_inc_line_addr (int line_delta, addressT addr_delta) { int len = size_inc_line_addr (line_delta, addr_delta); emit_inc_line_addr (line_delta, addr_delta, frag_more (len), len); } /* Write out an alternative form of line and address skips using DW_LNS_fixed_advance_pc opcodes. This uses more space than the default line and address information, but it is required if linker relaxation could change the code offsets. The following two routines *must* be kept in sync. */ #define ADDR_DELTA_LIMIT 50000 static int size_fixed_inc_line_addr (int line_delta, addressT addr_delta) { int len = 0; /* INT_MAX is a signal that this is actually a DW_LNE_end_sequence. */ if (line_delta != INT_MAX) len = 1 + sizeof_leb128 (line_delta, 1); if (addr_delta > ADDR_DELTA_LIMIT) { /* DW_LNS_extended_op */ len += 1 + sizeof_leb128 (sizeof_address + 1, 0); /* DW_LNE_set_address */ len += 1 + sizeof_address; } else /* DW_LNS_fixed_advance_pc */ len += 3; if (line_delta == INT_MAX) /* DW_LNS_extended_op + DW_LNE_end_sequence */ len += 3; else /* DW_LNS_copy */ len += 1; return len; } static void emit_fixed_inc_line_addr (int line_delta, addressT addr_delta, fragS *frag, char *p, int len) { expressionS *pexp; char *end = p + len; /* Line number sequences cannot go backward in addresses. This means we've incorrectly ordered the statements in the sequence. */ gas_assert ((offsetT) addr_delta >= 0); /* Verify that we have kept in sync with size_fixed_inc_line_addr. */ gas_assert (len == size_fixed_inc_line_addr (line_delta, addr_delta)); /* INT_MAX is a signal that this is actually a DW_LNE_end_sequence. */ if (line_delta != INT_MAX) { *p++ = DW_LNS_advance_line; p += output_leb128 (p, line_delta, 1); } pexp = symbol_get_value_expression (frag->fr_symbol); /* The DW_LNS_fixed_advance_pc opcode has a 2-byte operand so it can advance the address by at most 64K. Linker relaxation (without which this function would not be used) could change the operand by an unknown amount. If the address increment is getting close to the limit, just reset the address. */ if (addr_delta > ADDR_DELTA_LIMIT) { symbolS *to_sym; expressionS exp; gas_assert (pexp->X_op == O_subtract); to_sym = pexp->X_add_symbol; *p++ = DW_LNS_extended_op; p += output_leb128 (p, sizeof_address + 1, 0); *p++ = DW_LNE_set_address; exp.X_op = O_symbol; exp.X_add_symbol = to_sym; exp.X_add_number = 0; emit_expr_fix (&exp, sizeof_address, frag, p, TC_PARSE_CONS_RETURN_NONE); p += sizeof_address; } else { *p++ = DW_LNS_fixed_advance_pc; emit_expr_fix (pexp, 2, frag, p, TC_PARSE_CONS_RETURN_NONE); p += 2; } if (line_delta == INT_MAX) { *p++ = DW_LNS_extended_op; *p++ = 1; *p++ = DW_LNE_end_sequence; } else *p++ = DW_LNS_copy; gas_assert (p == end); } /* Generate a variant frag that we can use to relax address/line increments between fragments of the target segment. */ static void relax_inc_line_addr (int line_delta, symbolS *to_sym, symbolS *from_sym) { expressionS exp; int max_chars; exp.X_op = O_subtract; exp.X_add_symbol = to_sym; exp.X_op_symbol = from_sym; exp.X_add_number = 0; /* The maximum size of the frag is the line delta with a maximum sized address delta. */ if (DWARF2_USE_FIXED_ADVANCE_PC) max_chars = size_fixed_inc_line_addr (line_delta, -DWARF2_LINE_MIN_INSN_LENGTH); else max_chars = size_inc_line_addr (line_delta, -DWARF2_LINE_MIN_INSN_LENGTH); frag_var (rs_dwarf2dbg, max_chars, max_chars, 1, make_expr_symbol (&exp), line_delta, NULL); } /* The function estimates the size of a rs_dwarf2dbg variant frag based on the current values of the symbols. It is called before the relaxation loop. We set fr_subtype to the expected length. */ int dwarf2dbg_estimate_size_before_relax (fragS *frag) { offsetT addr_delta; int size; addr_delta = resolve_symbol_value (frag->fr_symbol); if (DWARF2_USE_FIXED_ADVANCE_PC) size = size_fixed_inc_line_addr (frag->fr_offset, addr_delta); else size = size_inc_line_addr (frag->fr_offset, addr_delta); frag->fr_subtype = size; return size; } /* This function relaxes a rs_dwarf2dbg variant frag based on the current values of the symbols. fr_subtype is the current length of the frag. This returns the change in frag length. */ int dwarf2dbg_relax_frag (fragS *frag) { int old_size, new_size; old_size = frag->fr_subtype; new_size = dwarf2dbg_estimate_size_before_relax (frag); return new_size - old_size; } /* This function converts a rs_dwarf2dbg variant frag into a normal fill frag. This is called after all relaxation has been done. fr_subtype will be the desired length of the frag. */ void dwarf2dbg_convert_frag (fragS *frag) { offsetT addr_diff; if (DWARF2_USE_FIXED_ADVANCE_PC) { /* If linker relaxation is enabled then the distance bewteen the two symbols in the frag->fr_symbol expression might change. Hence we cannot rely upon the value computed by resolve_symbol_value. Instead we leave the expression unfinalized and allow emit_fixed_inc_line_addr to create a fixup (which later becomes a relocation) that will allow the linker to correctly compute the actual address difference. We have to use a fixed line advance for this as we cannot (easily) relocate leb128 encoded values. */ int saved_finalize_syms = finalize_syms; finalize_syms = 0; addr_diff = resolve_symbol_value (frag->fr_symbol); finalize_syms = saved_finalize_syms; } else addr_diff = resolve_symbol_value (frag->fr_symbol); /* fr_var carries the max_chars that we created the fragment with. fr_subtype carries the current expected length. We must, of course, have allocated enough memory earlier. */ gas_assert (frag->fr_var >= (int) frag->fr_subtype); if (DWARF2_USE_FIXED_ADVANCE_PC) emit_fixed_inc_line_addr (frag->fr_offset, addr_diff, frag, frag->fr_literal + frag->fr_fix, frag->fr_subtype); else emit_inc_line_addr (frag->fr_offset, addr_diff, frag->fr_literal + frag->fr_fix, frag->fr_subtype); frag->fr_fix += frag->fr_subtype; frag->fr_type = rs_fill; frag->fr_var = 0; frag->fr_offset = 0; } /* Generate .debug_line content for the chain of line number entries beginning at E, for segment SEG. */ static void process_entries (segT seg, struct line_entry *e) { unsigned filenum = 1; unsigned line = 1; unsigned column = 0; unsigned isa = 0; unsigned flags = DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0; fragS *last_frag = NULL, *frag; addressT last_frag_ofs = 0, frag_ofs; symbolS *last_lab = NULL, *lab; struct line_entry *next; if (flag_dwarf_sections) { char * name; const char * sec_name; /* Switch to the relevent sub-section before we start to emit the line number table. FIXME: These sub-sections do not have a normal Line Number Program Header, thus strictly speaking they are not valid DWARF sections. Unfortunately the DWARF standard assumes a one-to-one relationship between compilation units and line number tables. Thus we have to have a .debug_line section, as well as our sub-sections, and we have to ensure that all of the sub-sections are merged into a proper .debug_line section before a debugger sees them. */ sec_name = bfd_get_section_name (stdoutput, seg); if (strcmp (sec_name, ".text") != 0) { unsigned int len; len = strlen (sec_name); name = xmalloc (len + 11 + 2); sprintf (name, ".debug_line%s", sec_name); subseg_set (subseg_get (name, FALSE), 0); } else /* Don't create a .debug_line.text section - that is redundant. Instead just switch back to the normal .debug_line section. */ subseg_set (subseg_get (".debug_line", FALSE), 0); } do { int line_delta; if (filenum != e->loc.filenum) { filenum = e->loc.filenum; out_opcode (DW_LNS_set_file); out_uleb128 (filenum); } if (column != e->loc.column) { column = e->loc.column; out_opcode (DW_LNS_set_column); out_uleb128 (column); } if (e->loc.discriminator != 0) { out_opcode (DW_LNS_extended_op); out_leb128 (1 + sizeof_leb128 (e->loc.discriminator, 0)); out_opcode (DW_LNE_set_discriminator); out_uleb128 (e->loc.discriminator); } if (isa != e->loc.isa) { isa = e->loc.isa; out_opcode (DW_LNS_set_isa); out_uleb128 (isa); } if ((e->loc.flags ^ flags) & DWARF2_FLAG_IS_STMT) { flags = e->loc.flags; out_opcode (DW_LNS_negate_stmt); } if (e->loc.flags & DWARF2_FLAG_BASIC_BLOCK) out_opcode (DW_LNS_set_basic_block); if (e->loc.flags & DWARF2_FLAG_PROLOGUE_END) out_opcode (DW_LNS_set_prologue_end); if (e->loc.flags & DWARF2_FLAG_EPILOGUE_BEGIN) out_opcode (DW_LNS_set_epilogue_begin); /* Don't try to optimize away redundant entries; gdb wants two entries for a function where the code starts on the same line as the {, and there's no way to identify that case here. Trust gcc to optimize appropriately. */ line_delta = e->loc.line - line; lab = e->label; frag = symbol_get_frag (lab); frag_ofs = S_GET_VALUE (lab); if (last_frag == NULL) { out_set_addr (lab); out_inc_line_addr (line_delta, 0); } else if (frag == last_frag && ! DWARF2_USE_FIXED_ADVANCE_PC) out_inc_line_addr (line_delta, frag_ofs - last_frag_ofs); else relax_inc_line_addr (line_delta, lab, last_lab); line = e->loc.line; last_lab = lab; last_frag = frag; last_frag_ofs = frag_ofs; next = e->next; free (e); e = next; } while (e); /* Emit a DW_LNE_end_sequence for the end of the section. */ frag = last_frag_for_seg (seg); frag_ofs = get_frag_fix (frag, seg); if (frag == last_frag && ! DWARF2_USE_FIXED_ADVANCE_PC) out_inc_line_addr (INT_MAX, frag_ofs - last_frag_ofs); else { lab = symbol_temp_new (seg, frag_ofs, frag); relax_inc_line_addr (INT_MAX, lab, last_lab); } } /* Emit the directory and file tables for .debug_line. */ static void out_file_list (void) { size_t size; const char *dir; char *cp; unsigned int i; /* Emit directory list. */ for (i = 1; i < dirs_in_use; ++i) { dir = remap_debug_filename (dirs[i]); size = strlen (dir) + 1; cp = frag_more (size); memcpy (cp, dir, size); } /* Terminate it. */ out_byte ('\0'); for (i = 1; i < files_in_use; ++i) { const char *fullfilename; if (files[i].filename == NULL) { as_bad (_("unassigned file number %ld"), (long) i); /* Prevent a crash later, particularly for file 1. */ files[i].filename = ""; continue; } fullfilename = DWARF2_FILE_NAME (files[i].filename, files[i].dir ? dirs [files [i].dir] : ""); size = strlen (fullfilename) + 1; cp = frag_more (size); memcpy (cp, fullfilename, size); out_uleb128 (files[i].dir); /* directory number */ /* Output the last modification timestamp. */ out_uleb128 (DWARF2_FILE_TIME_NAME (files[i].filename, files[i].dir ? dirs [files [i].dir] : "")); /* Output the filesize. */ out_uleb128 (DWARF2_FILE_SIZE_NAME (files[i].filename, files[i].dir ? dirs [files [i].dir] : "")); } /* Terminate filename list. */ out_byte (0); } /* Switch to SEC and output a header length field. Return the size of offsets used in SEC. The caller must set EXPR->X_add_symbol value to the end of the section. */ static int out_header (asection *sec, expressionS *exp) { symbolS *start_sym; symbolS *end_sym; subseg_set (sec, 0); start_sym = symbol_temp_new_now (); end_sym = symbol_temp_make (); /* Total length of the information. */ exp->X_op = O_subtract; exp->X_add_symbol = end_sym; exp->X_op_symbol = start_sym; switch (DWARF2_FORMAT (sec)) { case dwarf2_format_32bit: exp->X_add_number = -4; emit_expr (exp, 4); return 4; case dwarf2_format_64bit: exp->X_add_number = -12; out_four (-1); emit_expr (exp, 8); return 8; case dwarf2_format_64bit_irix: exp->X_add_number = -8; emit_expr (exp, 8); return 8; } as_fatal (_("internal error: unknown dwarf2 format")); return 0; } /* Emit the collected .debug_line data. */ static void out_debug_line (segT line_seg) { expressionS exp; symbolS *prologue_start, *prologue_end; symbolS *line_end; struct line_seg *s; int sizeof_offset; sizeof_offset = out_header (line_seg, &exp); line_end = exp.X_add_symbol; /* Version. */ out_two (DWARF2_LINE_VERSION); /* Length of the prologue following this length. */ prologue_start = symbol_temp_make (); prologue_end = symbol_temp_make (); exp.X_op = O_subtract; exp.X_add_symbol = prologue_end; exp.X_op_symbol = prologue_start; exp.X_add_number = 0; emit_expr (&exp, sizeof_offset); symbol_set_value_now (prologue_start); /* Parameters of the state machine. */ out_byte (DWARF2_LINE_MIN_INSN_LENGTH); out_byte (DWARF2_LINE_DEFAULT_IS_STMT); out_byte (DWARF2_LINE_BASE); out_byte (DWARF2_LINE_RANGE); out_byte (DWARF2_LINE_OPCODE_BASE); /* Standard opcode lengths. */ out_byte (0); /* DW_LNS_copy */ out_byte (1); /* DW_LNS_advance_pc */ out_byte (1); /* DW_LNS_advance_line */ out_byte (1); /* DW_LNS_set_file */ out_byte (1); /* DW_LNS_set_column */ out_byte (0); /* DW_LNS_negate_stmt */ out_byte (0); /* DW_LNS_set_basic_block */ out_byte (0); /* DW_LNS_const_add_pc */ out_byte (1); /* DW_LNS_fixed_advance_pc */ out_byte (0); /* DW_LNS_set_prologue_end */ out_byte (0); /* DW_LNS_set_epilogue_begin */ out_byte (1); /* DW_LNS_set_isa */ out_file_list (); symbol_set_value_now (prologue_end); /* For each section, emit a statement program. */ for (s = all_segs; s; s = s->next) if (SEG_NORMAL (s->seg)) process_entries (s->seg, s->head->head); else as_warn ("dwarf line number information for %s ignored", segment_name (s->seg)); if (flag_dwarf_sections) /* We have to switch to the special .debug_line_end section before emitting the end-of-debug_line symbol. The linker script arranges for this section to be placed after all the (potentially garbage collected) .debug_line.<foo> sections. This section contains the line_end symbol which is used to compute the size of the linked .debug_line section, as seen in the DWARF Line Number header. */ subseg_set (subseg_get (".debug_line_end", FALSE), 0); symbol_set_value_now (line_end); } static void out_debug_ranges (segT ranges_seg) { unsigned int addr_size = sizeof_address; struct line_seg *s; expressionS exp; unsigned int i; subseg_set (ranges_seg, 0); /* Base Address Entry. */ for (i = 0; i < addr_size; i++) out_byte (0xff); for (i = 0; i < addr_size; i++) out_byte (0); /* Range List Entry. */ for (s = all_segs; s; s = s->next) { fragS *frag; symbolS *beg, *end; frag = first_frag_for_seg (s->seg); beg = symbol_temp_new (s->seg, 0, frag); s->text_start = beg; frag = last_frag_for_seg (s->seg); end = symbol_temp_new (s->seg, get_frag_fix (frag, s->seg), frag); s->text_end = end; exp.X_op = O_symbol; exp.X_add_symbol = beg; exp.X_add_number = 0; emit_expr (&exp, addr_size); exp.X_op = O_symbol; exp.X_add_symbol = end; exp.X_add_number = 0; emit_expr (&exp, addr_size); } /* End of Range Entry. */ for (i = 0; i < addr_size; i++) out_byte (0); for (i = 0; i < addr_size; i++) out_byte (0); } /* Emit data for .debug_aranges. */ static void out_debug_aranges (segT aranges_seg, segT info_seg) { unsigned int addr_size = sizeof_address; struct line_seg *s; expressionS exp; symbolS *aranges_end; char *p; int sizeof_offset; sizeof_offset = out_header (aranges_seg, &exp); aranges_end = exp.X_add_symbol; /* Version. */ out_two (DWARF2_ARANGES_VERSION); /* Offset to .debug_info. */ TC_DWARF2_EMIT_OFFSET (section_symbol (info_seg), sizeof_offset); /* Size of an address (offset portion). */ out_byte (addr_size); /* Size of a segment descriptor. */ out_byte (0); /* Align the header. */ frag_align (ffs (2 * addr_size) - 1, 0, 0); for (s = all_segs; s; s = s->next) { fragS *frag; symbolS *beg, *end; frag = first_frag_for_seg (s->seg); beg = symbol_temp_new (s->seg, 0, frag); s->text_start = beg; frag = last_frag_for_seg (s->seg); end = symbol_temp_new (s->seg, get_frag_fix (frag, s->seg), frag); s->text_end = end; exp.X_op = O_symbol; exp.X_add_symbol = beg; exp.X_add_number = 0; emit_expr (&exp, addr_size); exp.X_op = O_subtract; exp.X_add_symbol = end; exp.X_op_symbol = beg; exp.X_add_number = 0; emit_expr (&exp, addr_size); } p = frag_more (2 * addr_size); md_number_to_chars (p, 0, addr_size); md_number_to_chars (p + addr_size, 0, addr_size); symbol_set_value_now (aranges_end); } /* Emit data for .debug_abbrev. Note that this must be kept in sync with out_debug_info below. */ static void out_debug_abbrev (segT abbrev_seg, segT info_seg ATTRIBUTE_UNUSED, segT line_seg ATTRIBUTE_UNUSED) { subseg_set (abbrev_seg, 0); out_uleb128 (1); out_uleb128 (DW_TAG_compile_unit); out_byte (DW_CHILDREN_no); if (DWARF2_FORMAT (line_seg) == dwarf2_format_32bit) out_abbrev (DW_AT_stmt_list, DW_FORM_data4); else out_abbrev (DW_AT_stmt_list, DW_FORM_data8); if (all_segs->next == NULL) { out_abbrev (DW_AT_low_pc, DW_FORM_addr); if (DWARF2_VERSION < 4) out_abbrev (DW_AT_high_pc, DW_FORM_addr); else out_abbrev (DW_AT_high_pc, (sizeof_address == 4 ? DW_FORM_data4 : DW_FORM_data8)); } else { if (DWARF2_FORMAT (info_seg) == dwarf2_format_32bit) out_abbrev (DW_AT_ranges, DW_FORM_data4); else out_abbrev (DW_AT_ranges, DW_FORM_data8); } out_abbrev (DW_AT_name, DW_FORM_string); out_abbrev (DW_AT_comp_dir, DW_FORM_string); out_abbrev (DW_AT_producer, DW_FORM_string); out_abbrev (DW_AT_language, DW_FORM_data2); out_abbrev (0, 0); /* Terminate the abbreviations for this compilation unit. */ out_byte (0); } /* Emit a description of this compilation unit for .debug_info. */ static void out_debug_info (segT info_seg, segT abbrev_seg, segT line_seg, segT ranges_seg) { char producer[128]; const char *comp_dir; const char *dirname; expressionS exp; symbolS *info_end; char *p; int len; int sizeof_offset; sizeof_offset = out_header (info_seg, &exp); info_end = exp.X_add_symbol; /* DWARF version. */ out_two (DWARF2_VERSION); /* .debug_abbrev offset */ TC_DWARF2_EMIT_OFFSET (section_symbol (abbrev_seg), sizeof_offset); /* Target address size. */ out_byte (sizeof_address); /* DW_TAG_compile_unit DIE abbrev */ out_uleb128 (1); /* DW_AT_stmt_list */ TC_DWARF2_EMIT_OFFSET (section_symbol (line_seg), (DWARF2_FORMAT (line_seg) == dwarf2_format_32bit ? 4 : 8)); /* These two attributes are emitted if all of the code is contiguous. */ if (all_segs->next == NULL) { /* DW_AT_low_pc */ exp.X_op = O_symbol; exp.X_add_symbol = all_segs->text_start; exp.X_add_number = 0; emit_expr (&exp, sizeof_address); /* DW_AT_high_pc */ if (DWARF2_VERSION < 4) exp.X_op = O_symbol; else { exp.X_op = O_subtract; exp.X_op_symbol = all_segs->text_start; } exp.X_add_symbol = all_segs->text_end; exp.X_add_number = 0; emit_expr (&exp, sizeof_address); } else { /* This attribute is emitted if the code is disjoint. */ /* DW_AT_ranges. */ TC_DWARF2_EMIT_OFFSET (section_symbol (ranges_seg), sizeof_offset); } /* DW_AT_name. We don't have the actual file name that was present on the command line, so assume files[1] is the main input file. We're not supposed to get called unless at least one line number entry was emitted, so this should always be defined. */ if (files_in_use == 0) abort (); if (files[1].dir) { dirname = remap_debug_filename (dirs[files[1].dir]); len = strlen (dirname); #ifdef TE_VMS /* Already has trailing slash. */ p = frag_more (len); memcpy (p, dirname, len); #else p = frag_more (len + 1); memcpy (p, dirname, len); INSERT_DIR_SEPARATOR (p, len); #endif } len = strlen (files[1].filename) + 1; p = frag_more (len); memcpy (p, files[1].filename, len); /* DW_AT_comp_dir */ comp_dir = remap_debug_filename (getpwd ()); len = strlen (comp_dir) + 1; p = frag_more (len); memcpy (p, comp_dir, len); /* DW_AT_producer */ sprintf (producer, "GNU AS %s", VERSION); len = strlen (producer) + 1; p = frag_more (len); memcpy (p, producer, len); /* DW_AT_language. Yes, this is probably not really MIPS, but the dwarf2 draft has no standard code for assembler. */ out_two (DW_LANG_Mips_Assembler); symbol_set_value_now (info_end); } void dwarf2_init (void) { last_seg_ptr = &all_segs; } /* Finish the dwarf2 debug sections. We emit .debug.line if there were any .file/.loc directives, or --gdwarf2 was given, or if the file has a non-empty .debug_info section and an empty .debug_line section. If we emit .debug_line, and the .debug_info section is empty, we also emit .debug_info, .debug_aranges and .debug_abbrev. ALL_SEGS will be non-null if there were any .file/.loc directives, or --gdwarf2 was given and there were any located instructions emitted. */ void dwarf2_finish (void) { segT line_seg; struct line_seg *s; segT info_seg; int emit_other_sections = 0; int empty_debug_line = 0; info_seg = bfd_get_section_by_name (stdoutput, ".debug_info"); emit_other_sections = info_seg == NULL || !seg_not_empty_p (info_seg); line_seg = bfd_get_section_by_name (stdoutput, ".debug_line"); empty_debug_line = line_seg == NULL || !seg_not_empty_p (line_seg); /* We can't construct a new debug_line section if we already have one. Give an error. */ if (all_segs && !empty_debug_line) as_fatal ("duplicate .debug_line sections"); if ((!all_segs && emit_other_sections) || (!emit_other_sections && !empty_debug_line)) /* If there is no line information and no non-empty .debug_info section, or if there is both a non-empty .debug_info and a non-empty .debug_line, then we do nothing. */ return; /* Calculate the size of an address for the target machine. */ sizeof_address = DWARF2_ADDR_SIZE (stdoutput); /* Create and switch to the line number section. */ line_seg = subseg_new (".debug_line", 0); bfd_set_section_flags (stdoutput, line_seg, SEC_READONLY | SEC_DEBUGGING); /* For each subsection, chain the debug entries together. */ for (s = all_segs; s; s = s->next) { struct line_subseg *lss = s->head; struct line_entry **ptail = lss->ptail; while ((lss = lss->next) != NULL) { *ptail = lss->head; ptail = lss->ptail; } } out_debug_line (line_seg); /* If this is assembler generated line info, and there is no debug_info already, we need .debug_info and .debug_abbrev sections as well. */ if (emit_other_sections) { segT abbrev_seg; segT aranges_seg; segT ranges_seg; gas_assert (all_segs); info_seg = subseg_new (".debug_info", 0); abbrev_seg = subseg_new (".debug_abbrev", 0); aranges_seg = subseg_new (".debug_aranges", 0); bfd_set_section_flags (stdoutput, info_seg, SEC_READONLY | SEC_DEBUGGING); bfd_set_section_flags (stdoutput, abbrev_seg, SEC_READONLY | SEC_DEBUGGING); bfd_set_section_flags (stdoutput, aranges_seg, SEC_READONLY | SEC_DEBUGGING); record_alignment (aranges_seg, ffs (2 * sizeof_address) - 1); if (all_segs->next == NULL) ranges_seg = NULL; else { ranges_seg = subseg_new (".debug_ranges", 0); bfd_set_section_flags (stdoutput, ranges_seg, SEC_READONLY | SEC_DEBUGGING); record_alignment (ranges_seg, ffs (2 * sizeof_address) - 1); out_debug_ranges (ranges_seg); } out_debug_aranges (aranges_seg, info_seg); out_debug_abbrev (abbrev_seg, info_seg, line_seg); out_debug_info (info_seg, abbrev_seg, line_seg, ranges_seg); } }
gpl-2.0
ipwndev/DSLinux-Mirror
user/gdb/sim/erc32/exec.c
24
44843
/* * This file is part of SIS. * * SIS, SPARC instruction simulator V1.8 Copyright (C) 1995 Jiri Gaisler, * European Space Agency * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 675 * Mass Ave, Cambridge, MA 02139, USA. * */ #include "sis.h" #include "end.h" #include <math.h> #include <stdio.h> extern int32 sis_verbose, sparclite; int ext_irl = 0; /* Load/store interlock delay */ #define FLSTHOLD 1 /* Load delay (delete if unwanted - speeds up simulation) */ #define LOAD_DEL 1 #define T_LD 2 #define T_LDD 3 #define T_ST 3 #define T_STD 4 #define T_LDST 4 #define T_JMPL 2 #define T_RETT 2 #define FSR_QNE 0x2000 #define FP_EXE_MODE 0 #define FP_EXC_PE 1 #define FP_EXC_MODE 2 #define FBA 8 #define FBN 0 #define FBNE 1 #define FBLG 2 #define FBUL 3 #define FBL 4 #define FBUG 5 #define FBG 6 #define FBU 7 #define FBA 8 #define FBE 9 #define FBUE 10 #define FBGE 11 #define FBUGE 12 #define FBLE 13 #define FBULE 14 #define FBO 15 #define FCC_E 0 #define FCC_L 1 #define FCC_G 2 #define FCC_U 3 #define PSR_ET 0x20 #define PSR_EF 0x1000 #define PSR_PS 0x40 #define PSR_S 0x80 #define PSR_N 0x0800000 #define PSR_Z 0x0400000 #define PSR_V 0x0200000 #define PSR_C 0x0100000 #define PSR_CC 0x0F00000 #define PSR_CWP 0x7 #define PSR_PIL 0x0f00 #define ICC_N (icc >> 3) #define ICC_Z (icc >> 2) #define ICC_V (icc >> 1) #define ICC_C (icc) #define FP_PRES (sregs->fpu_pres) #define TRAP_IEXC 1 #define TRAP_UNIMP 2 #define TRAP_PRIVI 3 #define TRAP_FPDIS 4 #define TRAP_WOFL 5 #define TRAP_WUFL 6 #define TRAP_UNALI 7 #define TRAP_FPEXC 8 #define TRAP_DEXC 9 #define TRAP_TAG 10 #define TRAP_DIV0 0x2a #define FSR_TT 0x1C000 #define FP_IEEE 0x04000 #define FP_UNIMP 0x0C000 #define FP_SEQ_ERR 0x10000 #define BICC_BN 0 #define BICC_BE 1 #define BICC_BLE 2 #define BICC_BL 3 #define BICC_BLEU 4 #define BICC_BCS 5 #define BICC_NEG 6 #define BICC_BVS 7 #define BICC_BA 8 #define BICC_BNE 9 #define BICC_BG 10 #define BICC_BGE 11 #define BICC_BGU 12 #define BICC_BCC 13 #define BICC_POS 14 #define BICC_BVC 15 #define INST_SIMM13 0x1fff #define INST_RS2 0x1f #define INST_I 0x2000 #define ADD 0x00 #define ADDCC 0x10 #define ADDX 0x08 #define ADDXCC 0x18 #define TADDCC 0x20 #define TSUBCC 0x21 #define TADDCCTV 0x22 #define TSUBCCTV 0x23 #define IAND 0x01 #define IANDCC 0x11 #define IANDN 0x05 #define IANDNCC 0x15 #define MULScc 0x24 #define DIVScc 0x1D #define SMUL 0x0B #define SMULCC 0x1B #define UMUL 0x0A #define UMULCC 0x1A #define SDIV 0x0F #define SDIVCC 0x1F #define UDIV 0x0E #define UDIVCC 0x1E #define IOR 0x02 #define IORCC 0x12 #define IORN 0x06 #define IORNCC 0x16 #define SLL 0x25 #define SRA 0x27 #define SRL 0x26 #define SUB 0x04 #define SUBCC 0x14 #define SUBX 0x0C #define SUBXCC 0x1C #define IXNOR 0x07 #define IXNORCC 0x17 #define IXOR 0x03 #define IXORCC 0x13 #define SETHI 0x04 #define BICC 0x02 #define FPBCC 0x06 #define RDY 0x28 #define RDPSR 0x29 #define RDWIM 0x2A #define RDTBR 0x2B #define SCAN 0x2C #define WRY 0x30 #define WRPSR 0x31 #define WRWIM 0x32 #define WRTBR 0x33 #define JMPL 0x38 #define RETT 0x39 #define TICC 0x3A #define SAVE 0x3C #define RESTORE 0x3D #define LDD 0x03 #define LDDA 0x13 #define LD 0x00 #define LDA 0x10 #define LDF 0x20 #define LDDF 0x23 #define LDSTUB 0x0D #define LDSTUBA 0x1D #define LDUB 0x01 #define LDUBA 0x11 #define LDSB 0x09 #define LDSBA 0x19 #define LDUH 0x02 #define LDUHA 0x12 #define LDSH 0x0A #define LDSHA 0x1A #define LDFSR 0x21 #define ST 0x04 #define STA 0x14 #define STB 0x05 #define STBA 0x15 #define STD 0x07 #define STDA 0x17 #define STF 0x24 #define STDFQ 0x26 #define STDF 0x27 #define STFSR 0x25 #define STH 0x06 #define STHA 0x16 #define SWAP 0x0F #define SWAPA 0x1F #define FLUSH 0x3B #define SIGN_BIT 0x80000000 /* # of cycles overhead when a trap is taken */ #define TRAP_C 3 /* Forward declarations */ static uint32 sub_cc PARAMS ((uint32 psr, int32 operand1, int32 operand2, int32 result)); static uint32 add_cc PARAMS ((uint32 psr, int32 operand1, int32 operand2, int32 result)); static void log_cc PARAMS ((int32 result, struct pstate *sregs)); static int fpexec PARAMS ((uint32 op3, uint32 rd, uint32 rs1, uint32 rs2, struct pstate *sregs)); static int chk_asi PARAMS ((struct pstate *sregs, uint32 *asi, uint32 op3)); extern struct estate ebase; extern int32 nfp,ift; #ifdef ERRINJ extern uint32 errtt, errftt; #endif static uint32 sub_cc(psr, operand1, operand2, result) uint32 psr; int32 operand1; int32 operand2; int32 result; { psr = ((psr & ~PSR_N) | ((result >> 8) & PSR_N)); if (result) psr &= ~PSR_Z; else psr |= PSR_Z; psr = (psr & ~PSR_V) | ((((operand1 & ~operand2 & ~result) | (~operand1 & operand2 & result)) >> 10) & PSR_V); psr = (psr & ~PSR_C) | ((((~operand1 & operand2) | ((~operand1 | operand2) & result)) >> 11) & PSR_C); return (psr); } uint32 add_cc(psr, operand1, operand2, result) uint32 psr; int32 operand1; int32 operand2; int32 result; { psr = ((psr & ~PSR_N) | ((result >> 8) & PSR_N)); if (result) psr &= ~PSR_Z; else psr |= PSR_Z; psr = (psr & ~PSR_V) | ((((operand1 & operand2 & ~result) | (~operand1 & ~operand2 & result)) >> 10) & PSR_V); psr = (psr & ~PSR_C) | ((((operand1 & operand2) | ((operand1 | operand2) & ~result)) >> 11) & PSR_C); return(psr); } static void log_cc(result, sregs) int32 result; struct pstate *sregs; { sregs->psr &= ~(PSR_CC); /* Zero CC bits */ sregs->psr = (sregs->psr | ((result >> 8) & PSR_N)); if (result == 0) sregs->psr |= PSR_Z; } /* Add two unsigned 32-bit integers, and calculate the carry out. */ static uint32 add32 (uint32 n1, uint32 n2, int *carry) { uint32 result = n1 + n2; *carry = result < n1 || result < n1; return(result); } /* Multiply two 32-bit integers. */ static void mul64 (uint32 n1, uint32 n2, uint32 *result_hi, uint32 *result_lo, int msigned) { uint32 lo, mid1, mid2, hi, reg_lo, reg_hi; int carry; int sign = 0; /* If this is a signed multiply, calculate the sign of the result and make the operands positive. */ if (msigned) { sign = (n1 ^ n2) & SIGN_BIT; if (n1 & SIGN_BIT) n1 = -n1; if (n2 & SIGN_BIT) n2 = -n2; } /* We can split the 32x32 into four 16x16 operations. This ensures that we do not lose precision on 32bit only hosts: */ lo = ((n1 & 0xFFFF) * (n2 & 0xFFFF)); mid1 = ((n1 & 0xFFFF) * ((n2 >> 16) & 0xFFFF)); mid2 = (((n1 >> 16) & 0xFFFF) * (n2 & 0xFFFF)); hi = (((n1 >> 16) & 0xFFFF) * ((n2 >> 16) & 0xFFFF)); /* We now need to add all of these results together, taking care to propogate the carries from the additions: */ reg_lo = add32 (lo, (mid1 << 16), &carry); reg_hi = carry; reg_lo = add32 (reg_lo, (mid2 << 16), &carry); reg_hi += (carry + ((mid1 >> 16) & 0xFFFF) + ((mid2 >> 16) & 0xFFFF) + hi); /* Negate result if necessary. */ if (sign) { reg_hi = ~ reg_hi; reg_lo = - reg_lo; if (reg_lo == 0) reg_hi++; } *result_lo = reg_lo; *result_hi = reg_hi; } /* Divide a 64-bit integer by a 32-bit integer. We cheat and assume that the host compiler supports long long operations. */ static void div64 (uint32 n1_hi, uint32 n1_low, uint32 n2, uint32 *result, int msigned) { uint64 n1; n1 = ((uint64) n1_hi) << 32; n1 |= ((uint64) n1_low) & 0xffffffff; if (msigned) { int64 n1_s = (int64) n1; int32 n2_s = (int32) n2; n1_s = n1_s / n2_s; n1 = (uint64) n1_s; } else n1 = n1 / n2; *result = (uint32) (n1 & 0xffffffff); } int dispatch_instruction(sregs) struct pstate *sregs; { uint32 cwp, op, op2, op3, asi, rd, cond, rs1, rs2; uint32 ldep, icc; int32 operand1, operand2, *rdd, result, eicc, new_cwp; int32 pc, npc, data, address, ws, mexc, fcc; int32 ddata[2]; sregs->ninst++; cwp = ((sregs->psr & PSR_CWP) << 4); op = sregs->inst >> 30; pc = sregs->npc; npc = sregs->npc + 4; op3 = rd = rs1 = operand2 = eicc = 0; rdd = 0; if (op & 2) { op3 = (sregs->inst >> 19) & 0x3f; rs1 = (sregs->inst >> 14) & 0x1f; rd = (sregs->inst >> 25) & 0x1f; #ifdef LOAD_DEL /* Check if load dependecy is possible */ if (ebase.simtime <= sregs->ildtime) ldep = (((op3 & 0x38) != 0x28) && ((op3 & 0x3e) != 0x34) && (sregs->ildreg != 0)); else ldep = 0; if (sregs->inst & INST_I) { if (ldep && (sregs->ildreg == rs1)) sregs->hold++; operand2 = sregs->inst; operand2 = ((operand2 << 19) >> 19); /* sign extend */ } else { rs2 = sregs->inst & INST_RS2; if (rs2 > 7) operand2 = sregs->r[(cwp + rs2) & 0x7f]; else operand2 = sregs->g[rs2]; if (ldep && ((sregs->ildreg == rs1) || (sregs->ildreg == rs2))) sregs->hold++; } #else if (sregs->inst & INST_I) { operand2 = sregs->inst; operand2 = ((operand2 << 19) >> 19); /* sign extend */ } else { rs2 = sregs->inst & INST_RS2; if (rs2 > 7) operand2 = sregs->r[(cwp + rs2) & 0x7f]; else operand2 = sregs->g[rs2]; } #endif if (rd > 7) rdd = &(sregs->r[(cwp + rd) & 0x7f]); else rdd = &(sregs->g[rd]); if (rs1 > 7) rs1 = sregs->r[(cwp + rs1) & 0x7f]; else rs1 = sregs->g[rs1]; } switch (op) { case 0: op2 = (sregs->inst >> 22) & 0x7; switch (op2) { case SETHI: rd = (sregs->inst >> 25) & 0x1f; if (rd > 7) rdd = &(sregs->r[(cwp + rd) & 0x7f]); else rdd = &(sregs->g[rd]); *rdd = sregs->inst << 10; break; case BICC: #ifdef STAT sregs->nbranch++; #endif icc = sregs->psr >> 20; cond = ((sregs->inst >> 25) & 0x0f); switch (cond) { case BICC_BN: eicc = 0; break; case BICC_BE: eicc = ICC_Z; break; case BICC_BLE: eicc = ICC_Z | (ICC_N ^ ICC_V); break; case BICC_BL: eicc = (ICC_N ^ ICC_V); break; case BICC_BLEU: eicc = ICC_C | ICC_Z; break; case BICC_BCS: eicc = ICC_C; break; case BICC_NEG: eicc = ICC_N; break; case BICC_BVS: eicc = ICC_V; break; case BICC_BA: eicc = 1; if (sregs->inst & 0x20000000) sregs->annul = 1; break; case BICC_BNE: eicc = ~(ICC_Z); break; case BICC_BG: eicc = ~(ICC_Z | (ICC_N ^ ICC_V)); break; case BICC_BGE: eicc = ~(ICC_N ^ ICC_V); break; case BICC_BGU: eicc = ~(ICC_C | ICC_Z); break; case BICC_BCC: eicc = ~(ICC_C); break; case BICC_POS: eicc = ~(ICC_N); break; case BICC_BVC: eicc = ~(ICC_V); break; } if (eicc & 1) { operand1 = sregs->inst; operand1 = ((operand1 << 10) >> 8); /* sign extend */ npc = sregs->pc + operand1; } else { if (sregs->inst & 0x20000000) sregs->annul = 1; } break; case FPBCC: #ifdef STAT sregs->nbranch++; #endif if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (ebase.simtime < sregs->ftime) { sregs->ftime = ebase.simtime + sregs->hold; } cond = ((sregs->inst >> 25) & 0x0f); fcc = (sregs->fsr >> 10) & 0x3; switch (cond) { case FBN: eicc = 0; break; case FBNE: eicc = (fcc != FCC_E); break; case FBLG: eicc = (fcc == FCC_L) || (fcc == FCC_G); break; case FBUL: eicc = (fcc == FCC_L) || (fcc == FCC_U); break; case FBL: eicc = (fcc == FCC_L); break; case FBUG: eicc = (fcc == FCC_G) || (fcc == FCC_U); break; case FBG: eicc = (fcc == FCC_G); break; case FBU: eicc = (fcc == FCC_U); break; case FBA: eicc = 1; if (sregs->inst & 0x20000000) sregs->annul = 1; break; case FBE: eicc = !(fcc != FCC_E); break; case FBUE: eicc = !((fcc == FCC_L) || (fcc == FCC_G)); break; case FBGE: eicc = !((fcc == FCC_L) || (fcc == FCC_U)); break; case FBUGE: eicc = !(fcc == FCC_L); break; case FBLE: eicc = !((fcc == FCC_G) || (fcc == FCC_U)); break; case FBULE: eicc = !(fcc == FCC_G); break; case FBO: eicc = !(fcc == FCC_U); break; } if (eicc) { operand1 = sregs->inst; operand1 = ((operand1 << 10) >> 8); /* sign extend */ npc = sregs->pc + operand1; } else { if (sregs->inst & 0x20000000) sregs->annul = 1; } break; default: sregs->trap = TRAP_UNIMP; break; } break; case 1: /* CALL */ #ifdef STAT sregs->nbranch++; #endif sregs->r[(cwp + 15) & 0x7f] = sregs->pc; npc = sregs->pc + (sregs->inst << 2); break; case 2: if ((op3 >> 1) == 0x1a) { if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; } else { rs1 = (sregs->inst >> 14) & 0x1f; rs2 = sregs->inst & 0x1f; sregs->trap = fpexec(op3, rd, rs1, rs2, sregs); } } else { switch (op3) { case TICC: icc = sregs->psr >> 20; cond = ((sregs->inst >> 25) & 0x0f); switch (cond) { case BICC_BN: eicc = 0; break; case BICC_BE: eicc = ICC_Z; break; case BICC_BLE: eicc = ICC_Z | (ICC_N ^ ICC_V); break; case BICC_BL: eicc = (ICC_N ^ ICC_V); break; case BICC_BLEU: eicc = ICC_C | ICC_Z; break; case BICC_BCS: eicc = ICC_C; break; case BICC_NEG: eicc = ICC_N; break; case BICC_BVS: eicc = ICC_V; break; case BICC_BA: eicc = 1; break; case BICC_BNE: eicc = ~(ICC_Z); break; case BICC_BG: eicc = ~(ICC_Z | (ICC_N ^ ICC_V)); break; case BICC_BGE: eicc = ~(ICC_N ^ ICC_V); break; case BICC_BGU: eicc = ~(ICC_C | ICC_Z); break; case BICC_BCC: eicc = ~(ICC_C); break; case BICC_POS: eicc = ~(ICC_N); break; case BICC_BVC: eicc = ~(ICC_V); break; } if (eicc & 1) { sregs->trap = (0x80 | ((rs1 + operand2) & 0x7f)); } break; case MULScc: operand1 = (((sregs->psr & PSR_V) ^ ((sregs->psr & PSR_N) >> 2)) << 10) | (rs1 >> 1); if ((sregs->y & 1) == 0) operand2 = 0; *rdd = operand1 + operand2; sregs->y = (rs1 << 31) | (sregs->y >> 1); sregs->psr = add_cc(sregs->psr, operand1, operand2, *rdd); break; case DIVScc: { int sign; uint32 result, remainder; int c0, y31; if (!sparclite) { sregs->trap = TRAP_UNIMP; break; } sign = ((sregs->psr & PSR_V) != 0) ^ ((sregs->psr & PSR_N) != 0); remainder = (sregs->y << 1) | (rs1 >> 31); /* If true sign is positive, calculate remainder - divisor. Otherwise, calculate remainder + divisor. */ if (sign == 0) operand2 = ~operand2 + 1; result = remainder + operand2; /* The SPARClite User's Manual is not clear on how the "carry out" of the above ALU operation is to be calculated. From trial and error tests on the the chip itself, it appears that it is a normal addition carry, and not a subtraction borrow, even in cases where the divisor is subtracted from the remainder. FIXME: get the true story from Fujitsu. */ c0 = result < (uint32) remainder || result < (uint32) operand2; if (result & 0x80000000) sregs->psr |= PSR_N; else sregs->psr &= ~PSR_N; y31 = (sregs->y & 0x80000000) == 0x80000000; if (result == 0 && sign == y31) sregs->psr |= PSR_Z; else sregs->psr &= ~PSR_Z; sign = (sign && !y31) || (!c0 && (sign || !y31)); if (sign ^ (result >> 31)) sregs->psr |= PSR_V; else sregs->psr &= ~PSR_V; if (!sign) sregs->psr |= PSR_C; else sregs->psr &= ~PSR_C; sregs->y = result; if (rd != 0) *rdd = (rs1 << 1) | !sign; } break; case SMUL: { mul64 (rs1, operand2, &sregs->y, rdd, 1); } break; case SMULCC: { uint32 result; mul64 (rs1, operand2, &sregs->y, &result, 1); if (result & 0x80000000) sregs->psr |= PSR_N; else sregs->psr &= ~PSR_N; if (result == 0) sregs->psr |= PSR_Z; else sregs->psr &= ~PSR_Z; *rdd = result; } break; case UMUL: { mul64 (rs1, operand2, &sregs->y, rdd, 0); } break; case UMULCC: { uint32 result; mul64 (rs1, operand2, &sregs->y, &result, 0); if (result & 0x80000000) sregs->psr |= PSR_N; else sregs->psr &= ~PSR_N; if (result == 0) sregs->psr |= PSR_Z; else sregs->psr &= ~PSR_Z; *rdd = result; } break; case SDIV: { if (sparclite) { sregs->trap = TRAP_UNIMP; break; } if (operand2 == 0) { sregs->trap = TRAP_DIV0; break; } div64 (sregs->y, rs1, operand2, rdd, 1); } break; case SDIVCC: { uint32 result; if (sparclite) { sregs->trap = TRAP_UNIMP; break; } if (operand2 == 0) { sregs->trap = TRAP_DIV0; break; } div64 (sregs->y, rs1, operand2, &result, 1); if (result & 0x80000000) sregs->psr |= PSR_N; else sregs->psr &= ~PSR_N; if (result == 0) sregs->psr |= PSR_Z; else sregs->psr &= ~PSR_Z; /* FIXME: should set overflow flag correctly. */ sregs->psr &= ~(PSR_C | PSR_V); *rdd = result; } break; case UDIV: { if (sparclite) { sregs->trap = TRAP_UNIMP; break; } if (operand2 == 0) { sregs->trap = TRAP_DIV0; break; } div64 (sregs->y, rs1, operand2, rdd, 0); } break; case UDIVCC: { uint32 result; if (sparclite) { sregs->trap = TRAP_UNIMP; break; } if (operand2 == 0) { sregs->trap = TRAP_DIV0; break; } div64 (sregs->y, rs1, operand2, &result, 0); if (result & 0x80000000) sregs->psr |= PSR_N; else sregs->psr &= ~PSR_N; if (result == 0) sregs->psr |= PSR_Z; else sregs->psr &= ~PSR_Z; /* FIXME: should set overflow flag correctly. */ sregs->psr &= ~(PSR_C | PSR_V); *rdd = result; } break; case IXNOR: *rdd = rs1 ^ ~operand2; break; case IXNORCC: *rdd = rs1 ^ ~operand2; log_cc(*rdd, sregs); break; case IXOR: *rdd = rs1 ^ operand2; break; case IXORCC: *rdd = rs1 ^ operand2; log_cc(*rdd, sregs); break; case IOR: *rdd = rs1 | operand2; break; case IORCC: *rdd = rs1 | operand2; log_cc(*rdd, sregs); break; case IORN: *rdd = rs1 | ~operand2; break; case IORNCC: *rdd = rs1 | ~operand2; log_cc(*rdd, sregs); break; case IANDNCC: *rdd = rs1 & ~operand2; log_cc(*rdd, sregs); break; case IANDN: *rdd = rs1 & ~operand2; break; case IAND: *rdd = rs1 & operand2; break; case IANDCC: *rdd = rs1 & operand2; log_cc(*rdd, sregs); break; case SUB: *rdd = rs1 - operand2; break; case SUBCC: *rdd = rs1 - operand2; sregs->psr = sub_cc(sregs->psr, rs1, operand2, *rdd); break; case SUBX: *rdd = rs1 - operand2 - ((sregs->psr >> 20) & 1); break; case SUBXCC: *rdd = rs1 - operand2 - ((sregs->psr >> 20) & 1); sregs->psr = sub_cc(sregs->psr, rs1, operand2, *rdd); break; case ADD: *rdd = rs1 + operand2; break; case ADDCC: *rdd = rs1 + operand2; sregs->psr = add_cc(sregs->psr, rs1, operand2, *rdd); break; case ADDX: *rdd = rs1 + operand2 + ((sregs->psr >> 20) & 1); break; case ADDXCC: *rdd = rs1 + operand2 + ((sregs->psr >> 20) & 1); sregs->psr = add_cc(sregs->psr, rs1, operand2, *rdd); break; case TADDCC: *rdd = rs1 + operand2; sregs->psr = add_cc(sregs->psr, rs1, operand2, *rdd); if ((rs1 | operand2) & 0x3) sregs->psr |= PSR_V; break; case TSUBCC: *rdd = rs1 - operand2; sregs->psr = sub_cc (sregs->psr, rs1, operand2, *rdd); if ((rs1 | operand2) & 0x3) sregs->psr |= PSR_V; break; case TADDCCTV: *rdd = rs1 + operand2; result = add_cc(0, rs1, operand2, *rdd); if ((rs1 | operand2) & 0x3) result |= PSR_V; if (result & PSR_V) { sregs->trap = TRAP_TAG; } else { sregs->psr = (sregs->psr & ~PSR_CC) | result; } break; case TSUBCCTV: *rdd = rs1 - operand2; result = add_cc (0, rs1, operand2, *rdd); if ((rs1 | operand2) & 0x3) result |= PSR_V; if (result & PSR_V) { sregs->trap = TRAP_TAG; } else { sregs->psr = (sregs->psr & ~PSR_CC) | result; } break; case SLL: *rdd = rs1 << (operand2 & 0x1f); break; case SRL: *rdd = rs1 >> (operand2 & 0x1f); break; case SRA: *rdd = ((int) rs1) >> (operand2 & 0x1f); break; case FLUSH: if (ift) sregs->trap = TRAP_UNIMP; break; case SAVE: new_cwp = ((sregs->psr & PSR_CWP) - 1) & PSR_CWP; if (sregs->wim & (1 << new_cwp)) { sregs->trap = TRAP_WOFL; break; } if (rd > 7) rdd = &(sregs->r[((new_cwp << 4) + rd) & 0x7f]); *rdd = rs1 + operand2; sregs->psr = (sregs->psr & ~PSR_CWP) | new_cwp; break; case RESTORE: new_cwp = ((sregs->psr & PSR_CWP) + 1) & PSR_CWP; if (sregs->wim & (1 << new_cwp)) { sregs->trap = TRAP_WUFL; break; } if (rd > 7) rdd = &(sregs->r[((new_cwp << 4) + rd) & 0x7f]); *rdd = rs1 + operand2; sregs->psr = (sregs->psr & ~PSR_CWP) | new_cwp; break; case RDPSR: if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; break; } *rdd = sregs->psr; break; case RDY: if (!sparclite) *rdd = sregs->y; else { int rs1_is_asr = (sregs->inst >> 14) & 0x1f; if ( 0 == rs1_is_asr ) *rdd = sregs->y; else if ( 17 == rs1_is_asr ) *rdd = sregs->asr17; else { sregs->trap = TRAP_UNIMP; break; } } break; case RDWIM: if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; break; } *rdd = sregs->wim; break; case RDTBR: if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; break; } *rdd = sregs->tbr; break; case WRPSR: if ((sregs->psr & 0x1f) > 7) { sregs->trap = TRAP_UNIMP; break; } if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; break; } sregs->psr = (rs1 ^ operand2) & 0x00f03fff; break; case WRWIM: if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; break; } sregs->wim = (rs1 ^ operand2) & 0x0ff; break; case WRTBR: if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; break; } sregs->tbr = (sregs->tbr & 0x00000ff0) | ((rs1 ^ operand2) & 0xfffff000); break; case WRY: if (!sparclite) sregs->y = (rs1 ^ operand2); else { if ( 0 == rd ) sregs->y = (rs1 ^ operand2); else if ( 17 == rd ) sregs->asr17 = (rs1 ^ operand2); else { sregs->trap = TRAP_UNIMP; break; } } break; case JMPL: #ifdef STAT sregs->nbranch++; #endif sregs->icnt = T_JMPL; /* JMPL takes two cycles */ if (rs1 & 0x3) { sregs->trap = TRAP_UNALI; break; } *rdd = sregs->pc; npc = rs1 + operand2; break; case RETT: address = rs1 + operand2; new_cwp = ((sregs->psr & PSR_CWP) + 1) & PSR_CWP; sregs->icnt = T_RETT; /* RETT takes two cycles */ if (sregs->psr & PSR_ET) { sregs->trap = TRAP_UNIMP; break; } if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; break; } if (sregs->wim & (1 << new_cwp)) { sregs->trap = TRAP_WUFL; break; } if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } sregs->psr = (sregs->psr & ~PSR_CWP) | new_cwp | PSR_ET; sregs->psr = (sregs->psr & ~PSR_S) | ((sregs->psr & PSR_PS) << 1); npc = address; break; case SCAN: { uint32 result, mask; int i; if (!sparclite) { sregs->trap = TRAP_UNIMP; break; } mask = (operand2 & 0x80000000) | (operand2 >> 1); result = rs1 ^ mask; for (i = 0; i < 32; i++) { if (result & 0x80000000) break; result <<= 1; } *rdd = i == 32 ? 63 : i; } break; default: sregs->trap = TRAP_UNIMP; break; } } break; case 3: /* Load/store instructions */ address = rs1 + operand2; if (sregs->psr & PSR_S) asi = 11; else asi = 10; if (op3 & 4) { sregs->icnt = T_ST; /* Set store instruction count */ #ifdef STAT sregs->nstore++; #endif } else { sregs->icnt = T_LD; /* Set load instruction count */ #ifdef STAT sregs->nload++; #endif } /* Decode load/store instructions */ switch (op3) { case LDDA: if (!chk_asi(sregs, &asi, op3)) break; case LDD: if (address & 0x7) { sregs->trap = TRAP_UNALI; break; } if (rd & 1) { rd &= 0x1e; if (rd > 7) rdd = &(sregs->r[(cwp + rd) & 0x7f]); else rdd = &(sregs->g[rd]); } mexc = memory_read(asi, address, ddata, 3, &ws); sregs->hold += ws * 2; sregs->icnt = T_LDD; if (mexc) { sregs->trap = TRAP_DEXC; } else { rdd[0] = ddata[0]; rdd[1] = ddata[1]; #ifdef STAT sregs->nload++; /* Double load counts twice */ #endif } break; case LDA: if (!chk_asi(sregs, &asi, op3)) break; case LD: if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } mexc = memory_read(asi, address, &data, 2, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } else { *rdd = data; } break; case LDSTUBA: if (!chk_asi(sregs, &asi, op3)) break; case LDSTUB: mexc = memory_read(asi, address, &data, 0, &ws); sregs->hold += ws; sregs->icnt = T_LDST; if (mexc) { sregs->trap = TRAP_DEXC; break; } *rdd = data; data = 0x0ff; mexc = memory_write(asi, address, &data, 0, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } #ifdef STAT sregs->nload++; #endif break; case LDSBA: case LDUBA: if (!chk_asi(sregs, &asi, op3)) break; case LDSB: case LDUB: mexc = memory_read(asi, address, &data, 0, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; break; } if ((op3 == LDSB) && (data & 0x80)) data |= 0xffffff00; *rdd = data; break; case LDSHA: case LDUHA: if (!chk_asi(sregs, &asi, op3)) break; case LDSH: case LDUH: if (address & 0x1) { sregs->trap = TRAP_UNALI; break; } mexc = memory_read(asi, address, &data, 1, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; break; } if ((op3 == LDSH) && (data & 0x8000)) data |= 0xffff0000; *rdd = data; break; case LDF: if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } if (ebase.simtime < sregs->ftime) { if ((sregs->frd == rd) || (sregs->frs1 == rd) || (sregs->frs2 == rd)) sregs->fhold += (sregs->ftime - ebase.simtime); } mexc = memory_read(asi, address, &data, 2, &ws); sregs->hold += ws; sregs->flrd = rd; sregs->ltime = ebase.simtime + sregs->icnt + FLSTHOLD + sregs->hold + sregs->fhold; if (mexc) { sregs->trap = TRAP_DEXC; } else { sregs->fs[rd] = *((float32 *) & data); } break; case LDDF: if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (address & 0x7) { sregs->trap = TRAP_UNALI; break; } if (ebase.simtime < sregs->ftime) { if (((sregs->frd >> 1) == (rd >> 1)) || ((sregs->frs1 >> 1) == (rd >> 1)) || ((sregs->frs2 >> 1) == (rd >> 1))) sregs->fhold += (sregs->ftime - ebase.simtime); } mexc = memory_read(asi, address, ddata, 3, &ws); sregs->hold += ws * 2; sregs->icnt = T_LDD; if (mexc) { sregs->trap = TRAP_DEXC; } else { rd &= 0x1E; sregs->flrd = rd; sregs->fs[rd] = *((float32 *) & ddata[0]); #ifdef STAT sregs->nload++; /* Double load counts twice */ #endif sregs->fs[rd + 1] = *((float32 *) & ddata[1]); sregs->ltime = ebase.simtime + sregs->icnt + FLSTHOLD + sregs->hold + sregs->fhold; } break; case LDFSR: if (ebase.simtime < sregs->ftime) { sregs->fhold += (sregs->ftime - ebase.simtime); } if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } mexc = memory_read(asi, address, &data, 2, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } else { sregs->fsr = (sregs->fsr & 0x7FF000) | (data & ~0x7FF000); set_fsr(sregs->fsr); } break; case STFSR: if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } if (ebase.simtime < sregs->ftime) { sregs->fhold += (sregs->ftime - ebase.simtime); } mexc = memory_write(asi, address, &sregs->fsr, 2, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } break; case STA: if (!chk_asi(sregs, &asi, op3)) break; case ST: if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } mexc = memory_write(asi, address, rdd, 2, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } break; case STBA: if (!chk_asi(sregs, &asi, op3)) break; case STB: mexc = memory_write(asi, address, rdd, 0, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } break; case STDA: if (!chk_asi(sregs, &asi, op3)) break; case STD: if (address & 0x7) { sregs->trap = TRAP_UNALI; break; } if (rd & 1) { rd &= 0x1e; if (rd > 7) rdd = &(sregs->r[(cwp + rd) & 0x7f]); else rdd = &(sregs->g[rd]); } mexc = memory_write(asi, address, rdd, 3, &ws); sregs->hold += ws; sregs->icnt = T_STD; #ifdef STAT sregs->nstore++; /* Double store counts twice */ #endif if (mexc) { sregs->trap = TRAP_DEXC; break; } break; case STDFQ: if ((sregs->psr & 0x1f) > 7) { sregs->trap = TRAP_UNIMP; break; } if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (address & 0x7) { sregs->trap = TRAP_UNALI; break; } if (!(sregs->fsr & FSR_QNE)) { sregs->fsr = (sregs->fsr & ~FSR_TT) | FP_SEQ_ERR; break; } rdd = &(sregs->fpq[0]); mexc = memory_write(asi, address, rdd, 3, &ws); sregs->hold += ws; sregs->icnt = T_STD; #ifdef STAT sregs->nstore++; /* Double store counts twice */ #endif if (mexc) { sregs->trap = TRAP_DEXC; break; } else { sregs->fsr &= ~FSR_QNE; sregs->fpstate = FP_EXE_MODE; } break; case STHA: if (!chk_asi(sregs, &asi, op3)) break; case STH: if (address & 0x1) { sregs->trap = TRAP_UNALI; break; } mexc = memory_write(asi, address, rdd, 1, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } break; case STF: if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } if (ebase.simtime < sregs->ftime) { if (sregs->frd == rd) sregs->fhold += (sregs->ftime - ebase.simtime); } mexc = memory_write(asi, address, &sregs->fsi[rd], 2, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; } break; case STDF: if (!((sregs->psr & PSR_EF) && FP_PRES)) { sregs->trap = TRAP_FPDIS; break; } if (address & 0x7) { sregs->trap = TRAP_UNALI; break; } rd &= 0x1E; if (ebase.simtime < sregs->ftime) { if ((sregs->frd == rd) || (sregs->frd + 1 == rd)) sregs->fhold += (sregs->ftime - ebase.simtime); } mexc = memory_write(asi, address, &sregs->fsi[rd], 3, &ws); sregs->hold += ws; sregs->icnt = T_STD; #ifdef STAT sregs->nstore++; /* Double store counts twice */ #endif if (mexc) { sregs->trap = TRAP_DEXC; } break; case SWAPA: if (!chk_asi(sregs, &asi, op3)) break; case SWAP: if (address & 0x3) { sregs->trap = TRAP_UNALI; break; } mexc = memory_read(asi, address, &data, 2, &ws); sregs->hold += ws; if (mexc) { sregs->trap = TRAP_DEXC; break; } mexc = memory_write(asi, address, rdd, 2, &ws); sregs->hold += ws; sregs->icnt = T_LDST; if (mexc) { sregs->trap = TRAP_DEXC; break; } else *rdd = data; #ifdef STAT sregs->nload++; #endif break; default: sregs->trap = TRAP_UNIMP; break; } #ifdef LOAD_DEL if (!(op3 & 4)) { sregs->ildtime = ebase.simtime + sregs->hold + sregs->icnt; sregs->ildreg = rd; if ((op3 | 0x10) == 0x13) sregs->ildreg |= 1; /* Double load, odd register loaded * last */ } #endif break; default: sregs->trap = TRAP_UNIMP; break; } sregs->g[0] = 0; if (!sregs->trap) { sregs->pc = pc; sregs->npc = npc; } return (0); } #define T_FABSs 2 #define T_FADDs 4 #define T_FADDd 4 #define T_FCMPs 4 #define T_FCMPd 4 #define T_FDIVs 20 #define T_FDIVd 35 #define T_FMOVs 2 #define T_FMULs 5 #define T_FMULd 9 #define T_FNEGs 2 #define T_FSQRTs 37 #define T_FSQRTd 65 #define T_FSUBs 4 #define T_FSUBd 4 #define T_FdTOi 7 #define T_FdTOs 3 #define T_FiTOs 6 #define T_FiTOd 6 #define T_FsTOi 6 #define T_FsTOd 2 #define FABSs 0x09 #define FADDs 0x41 #define FADDd 0x42 #define FCMPs 0x51 #define FCMPd 0x52 #define FCMPEs 0x55 #define FCMPEd 0x56 #define FDIVs 0x4D #define FDIVd 0x4E #define FMOVs 0x01 #define FMULs 0x49 #define FMULd 0x4A #define FNEGs 0x05 #define FSQRTs 0x29 #define FSQRTd 0x2A #define FSUBs 0x45 #define FSUBd 0x46 #define FdTOi 0xD2 #define FdTOs 0xC6 #define FiTOs 0xC4 #define FiTOd 0xC8 #define FsTOi 0xD1 #define FsTOd 0xC9 static int fpexec(op3, rd, rs1, rs2, sregs) uint32 op3, rd, rs1, rs2; struct pstate *sregs; { uint32 opf, tem, accex; int32 fcc; uint32 ldadj; if (sregs->fpstate == FP_EXC_MODE) { sregs->fsr = (sregs->fsr & ~FSR_TT) | FP_SEQ_ERR; sregs->fpstate = FP_EXC_PE; return (0); } if (sregs->fpstate == FP_EXC_PE) { sregs->fpstate = FP_EXC_MODE; return (TRAP_FPEXC); } opf = (sregs->inst >> 5) & 0x1ff; /* * Check if we already have an FPop in the pipe. If so, halt until it is * finished by incrementing fhold with the remaining execution time */ if (ebase.simtime < sregs->ftime) { sregs->fhold = (sregs->ftime - ebase.simtime); } else { sregs->fhold = 0; /* Check load dependencies. */ if (ebase.simtime < sregs->ltime) { /* Don't check rs1 if single operand instructions */ if (((opf >> 6) == 0) || ((opf >> 6) == 3)) rs1 = 32; /* Adjust for double floats */ ldadj = opf & 1; if (!(((sregs->flrd - rs1) >> ldadj) && ((sregs->flrd - rs2) >> ldadj))) sregs->fhold++; } } sregs->finst++; sregs->frs1 = rs1; /* Store src and dst for dependecy check */ sregs->frs2 = rs2; sregs->frd = rd; sregs->ftime = ebase.simtime + sregs->hold + sregs->fhold; /* SPARC is big-endian - swap double floats if host is little-endian */ /* This is ugly - I know ... */ /* FIXME: should use (CURRENT_HOST_BYTE_ORDER == CURRENT_TARGET_BYTE_ORDER) but what about machines where float values are different endianness from integer values? */ #ifdef HOST_LITTLE_ENDIAN_FLOAT rs1 &= 0x1f; switch (opf) { case FADDd: case FDIVd: case FMULd: case FSQRTd: case FSUBd: case FCMPd: case FCMPEd: case FdTOi: case FdTOs: sregs->fdp[rs1 | 1] = sregs->fs[rs1 & ~1]; sregs->fdp[rs1 & ~1] = sregs->fs[rs1 | 1]; sregs->fdp[rs2 | 1] = sregs->fs[rs2 & ~1]; sregs->fdp[rs2 & ~1] = sregs->fs[rs2 | 1]; default: ; } #endif clear_accex(); switch (opf) { case FABSs: sregs->fs[rd] = fabs(sregs->fs[rs2]); sregs->ftime += T_FABSs; sregs->frs1 = 32; /* rs1 ignored */ break; case FADDs: sregs->fs[rd] = sregs->fs[rs1] + sregs->fs[rs2]; sregs->ftime += T_FADDs; break; case FADDd: sregs->fd[rd >> 1] = sregs->fd[rs1 >> 1] + sregs->fd[rs2 >> 1]; sregs->ftime += T_FADDd; break; case FCMPs: case FCMPEs: if (sregs->fs[rs1] == sregs->fs[rs2]) fcc = 3; else if (sregs->fs[rs1] < sregs->fs[rs2]) fcc = 2; else if (sregs->fs[rs1] > sregs->fs[rs2]) fcc = 1; else fcc = 0; sregs->fsr |= 0x0C00; sregs->fsr &= ~(fcc << 10); sregs->ftime += T_FCMPs; sregs->frd = 32; /* rd ignored */ if ((fcc == 0) && (opf == FCMPEs)) { sregs->fpstate = FP_EXC_PE; sregs->fsr = (sregs->fsr & ~0x1C000) | (1 << 14); } break; case FCMPd: case FCMPEd: if (sregs->fd[rs1 >> 1] == sregs->fd[rs2 >> 1]) fcc = 3; else if (sregs->fd[rs1 >> 1] < sregs->fd[rs2 >> 1]) fcc = 2; else if (sregs->fd[rs1 >> 1] > sregs->fd[rs2 >> 1]) fcc = 1; else fcc = 0; sregs->fsr |= 0x0C00; sregs->fsr &= ~(fcc << 10); sregs->ftime += T_FCMPd; sregs->frd = 32; /* rd ignored */ if ((fcc == 0) && (opf == FCMPEd)) { sregs->fpstate = FP_EXC_PE; sregs->fsr = (sregs->fsr & ~FSR_TT) | FP_IEEE; } break; case FDIVs: sregs->fs[rd] = sregs->fs[rs1] / sregs->fs[rs2]; sregs->ftime += T_FDIVs; break; case FDIVd: sregs->fd[rd >> 1] = sregs->fd[rs1 >> 1] / sregs->fd[rs2 >> 1]; sregs->ftime += T_FDIVd; break; case FMOVs: sregs->fs[rd] = sregs->fs[rs2]; sregs->ftime += T_FMOVs; sregs->frs1 = 32; /* rs1 ignored */ break; case FMULs: sregs->fs[rd] = sregs->fs[rs1] * sregs->fs[rs2]; sregs->ftime += T_FMULs; break; case FMULd: sregs->fd[rd >> 1] = sregs->fd[rs1 >> 1] * sregs->fd[rs2 >> 1]; sregs->ftime += T_FMULd; break; case FNEGs: sregs->fs[rd] = -sregs->fs[rs2]; sregs->ftime += T_FNEGs; sregs->frs1 = 32; /* rs1 ignored */ break; case FSQRTs: if (sregs->fs[rs2] < 0.0) { sregs->fpstate = FP_EXC_PE; sregs->fsr = (sregs->fsr & ~FSR_TT) | FP_IEEE; sregs->fsr = (sregs->fsr & 0x1f) | 0x10; break; } sregs->fs[rd] = sqrt(sregs->fs[rs2]); sregs->ftime += T_FSQRTs; sregs->frs1 = 32; /* rs1 ignored */ break; case FSQRTd: if (sregs->fd[rs2 >> 1] < 0.0) { sregs->fpstate = FP_EXC_PE; sregs->fsr = (sregs->fsr & ~FSR_TT) | FP_IEEE; sregs->fsr = (sregs->fsr & 0x1f) | 0x10; break; } sregs->fd[rd >> 1] = sqrt(sregs->fd[rs2 >> 1]); sregs->ftime += T_FSQRTd; sregs->frs1 = 32; /* rs1 ignored */ break; case FSUBs: sregs->fs[rd] = sregs->fs[rs1] - sregs->fs[rs2]; sregs->ftime += T_FSUBs; break; case FSUBd: sregs->fd[rd >> 1] = sregs->fd[rs1 >> 1] - sregs->fd[rs2 >> 1]; sregs->ftime += T_FSUBd; break; case FdTOi: sregs->fsi[rd] = (int) sregs->fd[rs2 >> 1]; sregs->ftime += T_FdTOi; sregs->frs1 = 32; /* rs1 ignored */ break; case FdTOs: sregs->fs[rd] = (float32) sregs->fd[rs2 >> 1]; sregs->ftime += T_FdTOs; sregs->frs1 = 32; /* rs1 ignored */ break; case FiTOs: sregs->fs[rd] = (float32) sregs->fsi[rs2]; sregs->ftime += T_FiTOs; sregs->frs1 = 32; /* rs1 ignored */ break; case FiTOd: sregs->fd[rd >> 1] = (float64) sregs->fsi[rs2]; sregs->ftime += T_FiTOd; sregs->frs1 = 32; /* rs1 ignored */ break; case FsTOi: sregs->fsi[rd] = (int) sregs->fs[rs2]; sregs->ftime += T_FsTOi; sregs->frs1 = 32; /* rs1 ignored */ break; case FsTOd: sregs->fd[rd >> 1] = sregs->fs[rs2]; sregs->ftime += T_FsTOd; sregs->frs1 = 32; /* rs1 ignored */ break; default: sregs->fsr = (sregs->fsr & ~FSR_TT) | FP_UNIMP; sregs->fpstate = FP_EXC_PE; } #ifdef ERRINJ if (errftt) { sregs->fsr = (sregs->fsr & ~FSR_TT) | (errftt << 14); sregs->fpstate = FP_EXC_PE; if (sis_verbose) printf("Inserted fpu error %X\n",errftt); errftt = 0; } #endif accex = get_accex(); #ifdef HOST_LITTLE_ENDIAN_FLOAT switch (opf) { case FADDd: case FDIVd: case FMULd: case FSQRTd: case FSUBd: case FiTOd: case FsTOd: sregs->fs[rd & ~1] = sregs->fdp[rd | 1]; sregs->fs[rd | 1] = sregs->fdp[rd & ~1]; default: ; } #endif if (sregs->fpstate == FP_EXC_PE) { sregs->fpq[0] = sregs->pc; sregs->fpq[1] = sregs->inst; sregs->fsr |= FSR_QNE; } else { tem = (sregs->fsr >> 23) & 0x1f; if (tem & accex) { sregs->fpstate = FP_EXC_PE; sregs->fsr = (sregs->fsr & ~FSR_TT) | FP_IEEE; sregs->fsr = ((sregs->fsr & ~0x1f) | accex); } else { sregs->fsr = ((((sregs->fsr >> 5) | accex) << 5) | accex); } if (sregs->fpstate == FP_EXC_PE) { sregs->fpq[0] = sregs->pc; sregs->fpq[1] = sregs->inst; sregs->fsr |= FSR_QNE; } } clear_accex(); return (0); } static int chk_asi(sregs, asi, op3) struct pstate *sregs; uint32 *asi, op3; { if (!(sregs->psr & PSR_S)) { sregs->trap = TRAP_PRIVI; return (0); } else if (sregs->inst & INST_I) { sregs->trap = TRAP_UNIMP; return (0); } else *asi = (sregs->inst >> 5) & 0x0ff; return(1); } int execute_trap(sregs) struct pstate *sregs; { int32 cwp; if (sregs->trap == 256) { sregs->pc = 0; sregs->npc = 4; sregs->trap = 0; } else if (sregs->trap == 257) { return (ERROR); } else { if ((sregs->psr & PSR_ET) == 0) return (ERROR); sregs->tbr = (sregs->tbr & 0xfffff000) | (sregs->trap << 4); sregs->trap = 0; sregs->psr &= ~PSR_ET; sregs->psr |= ((sregs->psr & PSR_S) >> 1); sregs->annul = 0; sregs->psr = (((sregs->psr & PSR_CWP) - 1) & 0x7) | (sregs->psr & ~PSR_CWP); cwp = ((sregs->psr & PSR_CWP) << 4); sregs->r[(cwp + 17) & 0x7f] = sregs->pc; sregs->r[(cwp + 18) & 0x7f] = sregs->npc; sregs->psr |= PSR_S; sregs->pc = sregs->tbr; sregs->npc = sregs->tbr + 4; if ( 0 != (1 & sregs->asr17) ) { /* single vector trapping! */ sregs->pc = sregs->tbr & 0xfffff000; sregs->npc = sregs->pc + 4; } /* Increase simulator time */ sregs->icnt = TRAP_C; } return (0); } extern struct irqcell irqarr[16]; int check_interrupts(sregs) struct pstate *sregs; { #ifdef ERRINJ if (errtt) { sregs->trap = errtt; if (sis_verbose) printf("Inserted error trap 0x%02X\n",errtt); errtt = 0; } #endif if ((ext_irl) && (sregs->psr & PSR_ET) && ((ext_irl == 15) || (ext_irl > (int) ((sregs->psr & PSR_PIL) >> 8)))) { if (sregs->trap == 0) { sregs->trap = 16 + ext_irl; irqarr[ext_irl & 0x0f].callback(irqarr[ext_irl & 0x0f].arg); return(1); } } return(0); } void init_regs(sregs) struct pstate *sregs; { sregs->pc = 0; sregs->npc = 4; sregs->trap = 0; sregs->psr &= 0x00f03fdf; sregs->psr |= 0x080; /* Set supervisor bit */ sregs->breakpoint = 0; sregs->annul = 0; sregs->fpstate = FP_EXE_MODE; sregs->fpqn = 0; sregs->ftime = 0; sregs->ltime = 0; sregs->err_mode = 0; ext_irl = 0; sregs->g[0] = 0; #ifdef HOST_LITTLE_ENDIAN_FLOAT sregs->fdp = (float32 *) sregs->fd; sregs->fsi = (int32 *) sregs->fs; #else sregs->fs = (float32 *) sregs->fd; sregs->fsi = (int32 *) sregs->fd; #endif sregs->fsr = 0; sregs->fpu_pres = !nfp; set_fsr(sregs->fsr); sregs->bphit = 0; sregs->ildreg = 0; sregs->ildtime = 0; sregs->y = 0; sregs->asr17 = 0; sregs->rett_err = 0; sregs->jmpltime = 0; }
gpl-2.0
talexop/talexop_kernel_Note4_910C_5.1.1
drivers/misc/dmverity_query.c
24
3572
#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/highmem.h> #if defined(CONFIG_SOC_EXYNOS5433) || defined(CONFIG_SOC_EXYNOS5430) #if defined(__GNUC__) && \ defined(__GNUC_MINOR__) && \ defined(__GNUC_PATCHLEVEL__) && \ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)) \ >= 40502 #define MC_ARCH_EXTENSION_SEC #endif static inline u32 exynos_smc_verity(u32 cmd, u32 arg1, u32 arg2, u32 arg3) { register u32 reg0 __asm__("r0") = cmd; register u32 reg1 __asm__("r1") = arg1; register u32 reg2 __asm__("r2") = arg2; register u32 reg3 __asm__("r3") = arg3; __asm__ volatile ( #ifdef MC_ARCH_EXTENSION_SEC /* This pseudo op is supported and required from * binutils 2.21 on */ ".arch_extension sec\n" #endif "smc 0\n" : "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3) ); return reg1; } #endif static int verity_scm_call(void) { #if defined(CONFIG_SOC_EXYNOS5433) || defined(CONFIG_SOC_EXYNOS5430) #define CMD_READ_SYSTEM_IMAGE_CHECK_STATUS 3 return exynos_smc_verity(0x83000006, CMD_READ_SYSTEM_IMAGE_CHECK_STATUS, 0, 0); #else /* for QC */ int ret; struct { uint32_t cmd_id; uint32_t arg; } dmverity_data; uint32_t rsp = 4, rsp_len = 4; dmverity_data.cmd_id = 3; /* Read */ dmverity_data.arg = 0; ret = scm_call(245, 1, &dmverity_data, sizeof(dmverity_data), &rsp, rsp_len); if (0 != ret) return -1; else return rsp; #endif } #define DRIVER_DESC "Read whether odin flash succeeded" ssize_t dmverity_read(struct file *filep, char __user *buf, size_t size, loff_t *offset) { uint32_t odin_flag; //int ret; /* First check is to get rid of integer overflow exploits */ if (size < sizeof(uint32_t)) { printk(KERN_ERR"Size must be atleast %d\n", sizeof(uint32_t)); return -EINVAL; } odin_flag = verity_scm_call(); printk(KERN_INFO"dmverity: odin flag: %x\n", odin_flag); if (copy_to_user(buf, &odin_flag, sizeof(uint32_t))) { printk(KERN_ERR"Copy to user failed\n"); return -1; } else return sizeof(uint32_t); } static const struct file_operations dmverity_proc_fops = { .read = dmverity_read, }; /** * dmverity_odin_flag_read_init - Initialization function for DMVERITY * * It creates and initializes dmverity proc entry with initialized read handler */ static int __init dmverity_odin_flag_read_init(void) { //extern int boot_mode_recovery; if (/* boot_mode_recovery == */ 1) { /* Only create this in recovery mode. Not sure why I am doing this */ if (proc_create("dmverity_odin_flag", 0644,NULL, &dmverity_proc_fops) == NULL) { printk(KERN_ERR"dmverity_odin_flag_read_init: Error creating proc entry\n"); goto error_return; } printk(KERN_INFO"dmverity_odin_flag_read_init:: Registering /proc/dmverity_odin_flag Interface \n"); } else { printk(KERN_INFO"dmverity_odin_flag_read_init:: not enabling in non-recovery mode\n"); goto error_return; } return 0; error_return: return -1; } /** * dmverity_odin_flag_read_exit - Cleanup Code for DMVERITY * * It removes /proc/dmverity proc entry and does the required cleanup operations */ static void __exit dmverity_odin_flag_read_exit(void) { remove_proc_entry("dmverity_odin_flag", NULL); printk(KERN_INFO"Deregistering /proc/dmverity_odin_flag interface\n"); } module_init(dmverity_odin_flag_read_init); module_exit(dmverity_odin_flag_read_exit); MODULE_DESCRIPTION(DRIVER_DESC);
gpl-2.0
danshuk/u-boot-mini2440
drivers/bios_emulator/x86emu/ops.c
24
143944
/**************************************************************************** * Realmode X86 Emulator Library * * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * Jason Jin <Jason.jin@freescale.com> * * Copyright (C) 1991-2004 SciTech Software, Inc. * Copyright (C) David Mosberger-Tang * Copyright (C) 1999 Egbert Eich * * ======================================================================== * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appear in all copies and that * both that copyright notice and this permission notice appear in * supporting documentation, and that the name of the authors not be used * in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. The authors makes no * representations about the suitability of this software for any purpose. * It is provided "as is" without express or implied warranty. * * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. * * ======================================================================== * * Language: ANSI C * Environment: Any * Developer: Kendall Bennett * * Description: This file includes subroutines to implement the decoding * and emulation of all the x86 processor instructions. * * There are approximately 250 subroutines in here, which correspond * to the 256 byte-"opcodes" found on the 8086. The table which * dispatches this is found in the files optab.[ch]. * * Each opcode proc has a comment preceeding it which gives it's table * address. Several opcodes are missing (undefined) in the table. * * Each proc includes information for decoding (DECODE_PRINTF and * DECODE_PRINTF2), debugging (TRACE_REGS, SINGLE_STEP), and misc * functions (START_OF_INSTR, END_OF_INSTR). * * Many of the procedures are *VERY* similar in coding. This has * allowed for a very large amount of code to be generated in a fairly * short amount of time (i.e. cut, paste, and modify). The result is * that much of the code below could have been folded into subroutines * for a large reduction in size of this file. The downside would be * that there would be a penalty in execution speed. The file could * also have been *MUCH* larger by inlining certain functions which * were called. This could have resulted even faster execution. The * prime directive I used to decide whether to inline the code or to * modularize it, was basically: 1) no unnecessary subroutine calls, * 2) no routines more than about 200 lines in size, and 3) modularize * any code that I might not get right the first time. The fetch_* * subroutines fall into the latter category. The The decode_* fall * into the second category. The coding of the "switch(mod){ .... }" * in many of the subroutines below falls into the first category. * Especially, the coding of {add,and,or,sub,...}_{byte,word} * subroutines are an especially glaring case of the third guideline. * Since so much of the code is cloned from other modules (compare * opcode #00 to opcode #01), making the basic operations subroutine * calls is especially important; otherwise mistakes in coding an * "add" would represent a nightmare in maintenance. * * Jason ported this file to u-boot. place all the function pointer in * the got2 sector. Removed some opcode. * ****************************************************************************/ #include <common.h> #if defined(CONFIG_BIOSEMU) #include "x86emu/x86emui.h" /*----------------------------- Implementation ----------------------------*/ /* constant arrays to do several instructions in just one function */ #ifdef DEBUG static char *x86emu_GenOpName[8] = { "ADD", "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP"}; #endif /* used by several opcodes */ static u8 (*genop_byte_operation[])(u8 d, u8 s) __attribute__ ((section(GOT2_TYPE))) = { add_byte, /* 00 */ or_byte, /* 01 */ adc_byte, /* 02 */ sbb_byte, /* 03 */ and_byte, /* 04 */ sub_byte, /* 05 */ xor_byte, /* 06 */ cmp_byte, /* 07 */ }; static u16 (*genop_word_operation[])(u16 d, u16 s) __attribute__ ((section(GOT2_TYPE))) = { add_word, /*00 */ or_word, /*01 */ adc_word, /*02 */ sbb_word, /*03 */ and_word, /*04 */ sub_word, /*05 */ xor_word, /*06 */ cmp_word, /*07 */ }; static u32 (*genop_long_operation[])(u32 d, u32 s) __attribute__ ((section(GOT2_TYPE))) = { add_long, /*00 */ or_long, /*01 */ adc_long, /*02 */ sbb_long, /*03 */ and_long, /*04 */ sub_long, /*05 */ xor_long, /*06 */ cmp_long, /*07 */ }; /* used by opcodes 80, c0, d0, and d2. */ static u8(*opcD0_byte_operation[])(u8 d, u8 s) __attribute__ ((section(GOT2_TYPE))) = { rol_byte, ror_byte, rcl_byte, rcr_byte, shl_byte, shr_byte, shl_byte, /* sal_byte === shl_byte by definition */ sar_byte, }; /* used by opcodes c1, d1, and d3. */ static u16(*opcD1_word_operation[])(u16 s, u8 d) __attribute__ ((section(GOT2_TYPE))) = { rol_word, ror_word, rcl_word, rcr_word, shl_word, shr_word, shl_word, /* sal_byte === shl_byte by definition */ sar_word, }; /* used by opcodes c1, d1, and d3. */ static u32 (*opcD1_long_operation[])(u32 s, u8 d) __attribute__ ((section(GOT2_TYPE))) = { rol_long, ror_long, rcl_long, rcr_long, shl_long, shr_long, shl_long, /* sal_byte === shl_byte by definition */ sar_long, }; #ifdef DEBUG static char *opF6_names[8] = { "TEST\t", "", "NOT\t", "NEG\t", "MUL\t", "IMUL\t", "DIV\t", "IDIV\t" }; #endif /**************************************************************************** PARAMETERS: op1 - Instruction op code REMARKS: Handles illegal opcodes. ****************************************************************************/ void x86emuOp_illegal_op( u8 op1) { START_OF_INSTR(); if (M.x86.R_SP != 0) { DECODE_PRINTF("ILLEGAL X86 OPCODE\n"); TRACE_REGS(); DB( printk("%04x:%04x: %02X ILLEGAL X86 OPCODE!\n", M.x86.R_CS, M.x86.R_IP-1,op1)); HALT_SYS(); } else { /* If we get here, it means the stack pointer is back to zero * so we are just returning from an emulator service call * so therte is no need to display an error message. We trap * the emulator with an 0xF1 opcode to finish the service * call. */ X86EMU_halt_sys(); } END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcodes 0x00, 0x08, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38 ****************************************************************************/ void x86emuOp_genop_byte_RM_R(u8 op1) { int mod, rl, rh; uint destoffset; u8 *destreg, *srcreg; u8 destval; op1 = (op1 >> 3) & 0x7; START_OF_INSTR(); DECODE_PRINTF(x86emu_GenOpName[op1]); DECODE_PRINTF("\t"); FETCH_DECODE_MODRM(mod, rh, rl); if(mod<3) { destoffset = decode_rmXX_address(mod,rl); DECODE_PRINTF(","); destval = fetch_data_byte(destoffset); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = genop_byte_operation[op1](destval, *srcreg); store_data_byte(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = genop_byte_operation[op1](*destreg, *srcreg); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcodes 0x01, 0x09, 0x11, 0x19, 0x21, 0x29, 0x31, 0x39 ****************************************************************************/ void x86emuOp_genop_word_RM_R(u8 op1) { int mod, rl, rh; uint destoffset; op1 = (op1 >> 3) & 0x7; START_OF_INSTR(); DECODE_PRINTF(x86emu_GenOpName[op1]); DECODE_PRINTF("\t"); FETCH_DECODE_MODRM(mod, rh, rl); if(mod<3) { destoffset = decode_rmXX_address(mod,rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; u32 *srcreg; DECODE_PRINTF(","); destval = fetch_data_long(destoffset); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = genop_long_operation[op1](destval, *srcreg); store_data_long(destoffset, destval); } else { u16 destval; u16 *srcreg; DECODE_PRINTF(","); destval = fetch_data_word(destoffset); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = genop_word_operation[op1](destval, *srcreg); store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg,*srcreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = genop_long_operation[op1](*destreg, *srcreg); } else { u16 *destreg,*srcreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = genop_word_operation[op1](*destreg, *srcreg); } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcodes 0x02, 0x0a, 0x12, 0x1a, 0x22, 0x2a, 0x32, 0x3a ****************************************************************************/ void x86emuOp_genop_byte_R_RM(u8 op1) { int mod, rl, rh; u8 *destreg, *srcreg; uint srcoffset; u8 srcval; op1 = (op1 >> 3) & 0x7; START_OF_INSTR(); DECODE_PRINTF(x86emu_GenOpName[op1]); DECODE_PRINTF("\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF(","); srcoffset = decode_rmXX_address(mod,rl); srcval = fetch_data_byte(srcoffset); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_BYTE_REGISTER(rl); srcval = *srcreg; } DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = genop_byte_operation[op1](*destreg, srcval); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcodes 0x03, 0x0b, 0x13, 0x1b, 0x23, 0x2b, 0x33, 0x3b ****************************************************************************/ void x86emuOp_genop_word_R_RM(u8 op1) { int mod, rl, rh; uint srcoffset; u32 *destreg32, srcval; u16 *destreg; op1 = (op1 >> 3) & 0x7; START_OF_INSTR(); DECODE_PRINTF(x86emu_GenOpName[op1]); DECODE_PRINTF("\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { srcoffset = decode_rmXX_address(mod,rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { destreg32 = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcval = fetch_data_long(srcoffset); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg32 = genop_long_operation[op1](*destreg32, srcval); } else { destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcval = fetch_data_word(srcoffset); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = genop_word_operation[op1](*destreg, srcval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *srcreg; destreg32 = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg32 = genop_long_operation[op1](*destreg32, *srcreg); } else { u16 *srcreg; destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = genop_word_operation[op1](*destreg, *srcreg); } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcodes 0x04, 0x0c, 0x14, 0x1c, 0x24, 0x2c, 0x34, 0x3c ****************************************************************************/ void x86emuOp_genop_byte_AL_IMM(u8 op1) { u8 srcval; op1 = (op1 >> 3) & 0x7; START_OF_INSTR(); DECODE_PRINTF(x86emu_GenOpName[op1]); DECODE_PRINTF("\tAL,"); srcval = fetch_byte_imm(); DECODE_PRINTF2("%x\n", srcval); TRACE_AND_STEP(); M.x86.R_AL = genop_byte_operation[op1](M.x86.R_AL, srcval); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcodes 0x05, 0x0d, 0x15, 0x1d, 0x25, 0x2d, 0x35, 0x3d ****************************************************************************/ void x86emuOp_genop_word_AX_IMM(u8 op1) { u32 srcval; op1 = (op1 >> 3) & 0x7; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF(x86emu_GenOpName[op1]); DECODE_PRINTF("\tEAX,"); srcval = fetch_long_imm(); } else { DECODE_PRINTF(x86emu_GenOpName[op1]); DECODE_PRINTF("\tAX,"); srcval = fetch_word_imm(); } DECODE_PRINTF2("%x\n", srcval); TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { M.x86.R_EAX = genop_long_operation[op1](M.x86.R_EAX, srcval); } else { M.x86.R_AX = genop_word_operation[op1](M.x86.R_AX, (u16)srcval); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x06 ****************************************************************************/ void x86emuOp_push_ES(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("PUSH\tES\n"); TRACE_AND_STEP(); push_word(M.x86.R_ES); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x07 ****************************************************************************/ void x86emuOp_pop_ES(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("POP\tES\n"); TRACE_AND_STEP(); M.x86.R_ES = pop_word(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x0e ****************************************************************************/ void x86emuOp_push_CS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("PUSH\tCS\n"); TRACE_AND_STEP(); push_word(M.x86.R_CS); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x0f. Escape for two-byte opcode (286 or better) ****************************************************************************/ void x86emuOp_two_byte(u8 X86EMU_UNUSED(op1)) { u8 op2 = (*sys_rdb)(((u32)M.x86.R_CS << 4) + (M.x86.R_IP++)); INC_DECODED_INST_LEN(1); (*x86emu_optab2[op2])(op2); } /**************************************************************************** REMARKS: Handles opcode 0x16 ****************************************************************************/ void x86emuOp_push_SS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("PUSH\tSS\n"); TRACE_AND_STEP(); push_word(M.x86.R_SS); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x17 ****************************************************************************/ void x86emuOp_pop_SS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("POP\tSS\n"); TRACE_AND_STEP(); M.x86.R_SS = pop_word(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x1e ****************************************************************************/ void x86emuOp_push_DS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("PUSH\tDS\n"); TRACE_AND_STEP(); push_word(M.x86.R_DS); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x1f ****************************************************************************/ void x86emuOp_pop_DS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("POP\tDS\n"); TRACE_AND_STEP(); M.x86.R_DS = pop_word(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x26 ****************************************************************************/ void x86emuOp_segovr_ES(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("ES:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_SEGOVR_ES; /* * note the lack of DECODE_CLEAR_SEGOVR(r) since, here is one of 4 * opcode subroutines we do not want to do this. */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x27 ****************************************************************************/ void x86emuOp_daa(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("DAA\n"); TRACE_AND_STEP(); M.x86.R_AL = daa_byte(M.x86.R_AL); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x2e ****************************************************************************/ void x86emuOp_segovr_CS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("CS:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_SEGOVR_CS; /* note no DECODE_CLEAR_SEGOVR here. */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x2f ****************************************************************************/ void x86emuOp_das(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("DAS\n"); TRACE_AND_STEP(); M.x86.R_AL = das_byte(M.x86.R_AL); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x36 ****************************************************************************/ void x86emuOp_segovr_SS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("SS:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_SEGOVR_SS; /* no DECODE_CLEAR_SEGOVR ! */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x37 ****************************************************************************/ void x86emuOp_aaa(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("AAA\n"); TRACE_AND_STEP(); M.x86.R_AX = aaa_word(M.x86.R_AX); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x3e ****************************************************************************/ void x86emuOp_segovr_DS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("DS:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_SEGOVR_DS; /* NO DECODE_CLEAR_SEGOVR! */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x3f ****************************************************************************/ void x86emuOp_aas(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("AAS\n"); TRACE_AND_STEP(); M.x86.R_AX = aas_word(M.x86.R_AX); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x40 - 0x47 ****************************************************************************/ void x86emuOp_inc_register(u8 op1) { START_OF_INSTR(); op1 &= 0x7; DECODE_PRINTF("INC\t"); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *reg; reg = DECODE_RM_LONG_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *reg = inc_long(*reg); } else { u16 *reg; reg = DECODE_RM_WORD_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *reg = inc_word(*reg); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x48 - 0x4F ****************************************************************************/ void x86emuOp_dec_register(u8 op1) { START_OF_INSTR(); op1 &= 0x7; DECODE_PRINTF("DEC\t"); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *reg; reg = DECODE_RM_LONG_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *reg = dec_long(*reg); } else { u16 *reg; reg = DECODE_RM_WORD_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *reg = dec_word(*reg); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x50 - 0x57 ****************************************************************************/ void x86emuOp_push_register(u8 op1) { START_OF_INSTR(); op1 &= 0x7; DECODE_PRINTF("PUSH\t"); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *reg; reg = DECODE_RM_LONG_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); push_long(*reg); } else { u16 *reg; reg = DECODE_RM_WORD_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); push_word(*reg); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x58 - 0x5F ****************************************************************************/ void x86emuOp_pop_register(u8 op1) { START_OF_INSTR(); op1 &= 0x7; DECODE_PRINTF("POP\t"); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *reg; reg = DECODE_RM_LONG_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *reg = pop_long(); } else { u16 *reg; reg = DECODE_RM_WORD_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *reg = pop_word(); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x60 ****************************************************************************/ void x86emuOp_push_all(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("PUSHAD\n"); } else { DECODE_PRINTF("PUSHA\n"); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 old_sp = M.x86.R_ESP; push_long(M.x86.R_EAX); push_long(M.x86.R_ECX); push_long(M.x86.R_EDX); push_long(M.x86.R_EBX); push_long(old_sp); push_long(M.x86.R_EBP); push_long(M.x86.R_ESI); push_long(M.x86.R_EDI); } else { u16 old_sp = M.x86.R_SP; push_word(M.x86.R_AX); push_word(M.x86.R_CX); push_word(M.x86.R_DX); push_word(M.x86.R_BX); push_word(old_sp); push_word(M.x86.R_BP); push_word(M.x86.R_SI); push_word(M.x86.R_DI); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x61 ****************************************************************************/ void x86emuOp_pop_all(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("POPAD\n"); } else { DECODE_PRINTF("POPA\n"); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { M.x86.R_EDI = pop_long(); M.x86.R_ESI = pop_long(); M.x86.R_EBP = pop_long(); M.x86.R_ESP += 4; /* skip ESP */ M.x86.R_EBX = pop_long(); M.x86.R_EDX = pop_long(); M.x86.R_ECX = pop_long(); M.x86.R_EAX = pop_long(); } else { M.x86.R_DI = pop_word(); M.x86.R_SI = pop_word(); M.x86.R_BP = pop_word(); M.x86.R_SP += 2; /* skip SP */ M.x86.R_BX = pop_word(); M.x86.R_DX = pop_word(); M.x86.R_CX = pop_word(); M.x86.R_AX = pop_word(); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /*opcode 0x62 ILLEGAL OP, calls x86emuOp_illegal_op() */ /*opcode 0x63 ILLEGAL OP, calls x86emuOp_illegal_op() */ /**************************************************************************** REMARKS: Handles opcode 0x64 ****************************************************************************/ void x86emuOp_segovr_FS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("FS:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_SEGOVR_FS; /* * note the lack of DECODE_CLEAR_SEGOVR(r) since, here is one of 4 * opcode subroutines we do not want to do this. */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x65 ****************************************************************************/ void x86emuOp_segovr_GS(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("GS:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_SEGOVR_GS; /* * note the lack of DECODE_CLEAR_SEGOVR(r) since, here is one of 4 * opcode subroutines we do not want to do this. */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x66 - prefix for 32-bit register ****************************************************************************/ void x86emuOp_prefix_data(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("DATA:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_PREFIX_DATA; /* note no DECODE_CLEAR_SEGOVR here. */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x67 - prefix for 32-bit address ****************************************************************************/ void x86emuOp_prefix_addr(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("ADDR:\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_PREFIX_ADDR; /* note no DECODE_CLEAR_SEGOVR here. */ END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x68 ****************************************************************************/ void x86emuOp_push_word_IMM(u8 X86EMU_UNUSED(op1)) { u32 imm; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { imm = fetch_long_imm(); } else { imm = fetch_word_imm(); } DECODE_PRINTF2("PUSH\t%x\n", imm); TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { push_long(imm); } else { push_word((u16)imm); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x69 ****************************************************************************/ void x86emuOp_imul_word_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint srcoffset; START_OF_INSTR(); DECODE_PRINTF("IMUL\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { srcoffset = decode_rmXX_address(mod, rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; u32 srcval; u32 res_lo,res_hi; s32 imm; destreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcval = fetch_data_long(srcoffset); imm = fetch_long_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); TRACE_AND_STEP(); imul_long_direct(&res_lo,&res_hi,(s32)srcval,(s32)imm); if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u32)res_lo; } else { u16 *destreg; u16 srcval; u32 res; s16 imm; destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcval = fetch_data_word(srcoffset); imm = fetch_word_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); TRACE_AND_STEP(); res = (s16)srcval * (s16)imm; if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u16)res; } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg,*srcreg; u32 res_lo,res_hi; s32 imm; destreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rl); imm = fetch_long_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); TRACE_AND_STEP(); imul_long_direct(&res_lo,&res_hi,(s32)*srcreg,(s32)imm); if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u32)res_lo; } else { u16 *destreg,*srcreg; u32 res; s16 imm; destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rl); imm = fetch_word_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); res = (s16)*srcreg * (s16)imm; if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u16)res; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x6a ****************************************************************************/ void x86emuOp_push_byte_IMM(u8 X86EMU_UNUSED(op1)) { s16 imm; START_OF_INSTR(); imm = (s8)fetch_byte_imm(); DECODE_PRINTF2("PUSH\t%d\n", imm); TRACE_AND_STEP(); push_word(imm); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x6b ****************************************************************************/ void x86emuOp_imul_byte_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint srcoffset; s8 imm; START_OF_INSTR(); DECODE_PRINTF("IMUL\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { srcoffset = decode_rmXX_address(mod, rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; u32 srcval; u32 res_lo,res_hi; destreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcval = fetch_data_long(srcoffset); imm = fetch_byte_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); TRACE_AND_STEP(); imul_long_direct(&res_lo,&res_hi,(s32)srcval,(s32)imm); if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u32)res_lo; } else { u16 *destreg; u16 srcval; u32 res; destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcval = fetch_data_word(srcoffset); imm = fetch_byte_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); TRACE_AND_STEP(); res = (s16)srcval * (s16)imm; if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u16)res; } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg,*srcreg; u32 res_lo,res_hi; destreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rl); imm = fetch_byte_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); TRACE_AND_STEP(); imul_long_direct(&res_lo,&res_hi,(s32)*srcreg,(s32)imm); if ((((res_lo & 0x80000000) == 0) && (res_hi == 0x00000000)) || (((res_lo & 0x80000000) != 0) && (res_hi == 0xFFFFFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u32)res_lo; } else { u16 *destreg,*srcreg; u32 res; destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rl); imm = fetch_byte_imm(); DECODE_PRINTF2(",%d\n", (s32)imm); TRACE_AND_STEP(); res = (s16)*srcreg * (s16)imm; if ((((res & 0x8000) == 0) && ((res >> 16) == 0x0000)) || (((res & 0x8000) != 0) && ((res >> 16) == 0xFFFF))) { CLEAR_FLAG(F_CF); CLEAR_FLAG(F_OF); } else { SET_FLAG(F_CF); SET_FLAG(F_OF); } *destreg = (u16)res; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x6c ****************************************************************************/ void x86emuOp_ins_byte(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("INSB\n"); ins(1); TRACE_AND_STEP(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x6d ****************************************************************************/ void x86emuOp_ins_word(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("INSD\n"); ins(4); } else { DECODE_PRINTF("INSW\n"); ins(2); } TRACE_AND_STEP(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x6e ****************************************************************************/ void x86emuOp_outs_byte(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("OUTSB\n"); outs(1); TRACE_AND_STEP(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x6f ****************************************************************************/ void x86emuOp_outs_word(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("OUTSD\n"); outs(4); } else { DECODE_PRINTF("OUTSW\n"); outs(2); } TRACE_AND_STEP(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x70 - 0x7F ****************************************************************************/ int x86emu_check_jump_condition(u8 op); void x86emuOp_jump_near_cond(u8 op1) { s8 offset; u16 target; int cond; /* jump to byte offset if overflow flag is set */ START_OF_INSTR(); cond = x86emu_check_jump_condition(op1 & 0xF); offset = (s8)fetch_byte_imm(); target = (u16)(M.x86.R_IP + (s16)offset); DECODE_PRINTF2("%x\n", target); TRACE_AND_STEP(); if (cond) M.x86.R_IP = target; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x80 ****************************************************************************/ void x86emuOp_opc80_byte_RM_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg; uint destoffset; u8 imm; u8 destval; /* * Weirdo special case instruction format. Part of the opcode * held below in "RH". Doubly nested case would result, except * that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ADD\t"); break; case 1: DECODE_PRINTF("OR\t"); break; case 2: DECODE_PRINTF("ADC\t"); break; case 3: DECODE_PRINTF("SBB\t"); break; case 4: DECODE_PRINTF("AND\t"); break; case 5: DECODE_PRINTF("SUB\t"); break; case 6: DECODE_PRINTF("XOR\t"); break; case 7: DECODE_PRINTF("CMP\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(","); destval = fetch_data_byte(destoffset); imm = fetch_byte_imm(); DECODE_PRINTF2("%x\n", imm); TRACE_AND_STEP(); destval = (*genop_byte_operation[rh]) (destval, imm); if (rh != 7) store_data_byte(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF(","); imm = fetch_byte_imm(); DECODE_PRINTF2("%x\n", imm); TRACE_AND_STEP(); destval = (*genop_byte_operation[rh]) (*destreg, imm); if (rh != 7) *destreg = destval; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x81 ****************************************************************************/ void x86emuOp_opc81_word_RM_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; /* * Weirdo special case instruction format. Part of the opcode * held below in "RH". Doubly nested case would result, except * that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ADD\t"); break; case 1: DECODE_PRINTF("OR\t"); break; case 2: DECODE_PRINTF("ADC\t"); break; case 3: DECODE_PRINTF("SBB\t"); break; case 4: DECODE_PRINTF("AND\t"); break; case 5: DECODE_PRINTF("SUB\t"); break; case 6: DECODE_PRINTF("XOR\t"); break; case 7: DECODE_PRINTF("CMP\t"); break; } } #endif /* * Know operation, decode the mod byte to find the addressing * mode. */ if (mod < 3) { DECODE_PRINTF("DWORD PTR "); destoffset = decode_rmXX_address(mod, rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval,imm; DECODE_PRINTF(","); destval = fetch_data_long(destoffset); imm = fetch_long_imm(); DECODE_PRINTF2("%x\n", imm); TRACE_AND_STEP(); destval = (*genop_long_operation[rh]) (destval, imm); if (rh != 7) store_data_long(destoffset, destval); } else { u16 destval,imm; DECODE_PRINTF(","); destval = fetch_data_word(destoffset); imm = fetch_word_imm(); DECODE_PRINTF2("%x\n", imm); TRACE_AND_STEP(); destval = (*genop_word_operation[rh]) (destval, imm); if (rh != 7) store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; u32 destval,imm; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF(","); imm = fetch_long_imm(); DECODE_PRINTF2("%x\n", imm); TRACE_AND_STEP(); destval = (*genop_long_operation[rh]) (*destreg, imm); if (rh != 7) *destreg = destval; } else { u16 *destreg; u16 destval,imm; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(","); imm = fetch_word_imm(); DECODE_PRINTF2("%x\n", imm); TRACE_AND_STEP(); destval = (*genop_word_operation[rh]) (*destreg, imm); if (rh != 7) *destreg = destval; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x82 ****************************************************************************/ void x86emuOp_opc82_byte_RM_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg; uint destoffset; u8 imm; u8 destval; /* * Weirdo special case instruction format. Part of the opcode * held below in "RH". Doubly nested case would result, except * that the decoded instruction Similar to opcode 81, except that * the immediate byte is sign extended to a word length. */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ADD\t"); break; case 1: DECODE_PRINTF("OR\t"); break; case 2: DECODE_PRINTF("ADC\t"); break; case 3: DECODE_PRINTF("SBB\t"); break; case 4: DECODE_PRINTF("AND\t"); break; case 5: DECODE_PRINTF("SUB\t"); break; case 6: DECODE_PRINTF("XOR\t"); break; case 7: DECODE_PRINTF("CMP\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); destval = fetch_data_byte(destoffset); imm = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); destval = (*genop_byte_operation[rh]) (destval, imm); if (rh != 7) store_data_byte(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); imm = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); destval = (*genop_byte_operation[rh]) (*destreg, imm); if (rh != 7) *destreg = destval; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x83 ****************************************************************************/ void x86emuOp_opc83_word_RM_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; /* * Weirdo special case instruction format. Part of the opcode * held below in "RH". Doubly nested case would result, except * that the decoded instruction Similar to opcode 81, except that * the immediate byte is sign extended to a word length. */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ADD\t"); break; case 1: DECODE_PRINTF("OR\t"); break; case 2: DECODE_PRINTF("ADC\t"); break; case 3: DECODE_PRINTF("SBB\t"); break; case 4: DECODE_PRINTF("AND\t"); break; case 5: DECODE_PRINTF("SUB\t"); break; case 6: DECODE_PRINTF("XOR\t"); break; case 7: DECODE_PRINTF("CMP\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ if (mod < 3) { DECODE_PRINTF("DWORD PTR "); destoffset = decode_rmXX_address(mod,rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval,imm; destval = fetch_data_long(destoffset); imm = (s8) fetch_byte_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); destval = (*genop_long_operation[rh]) (destval, imm); if (rh != 7) store_data_long(destoffset, destval); } else { u16 destval,imm; destval = fetch_data_word(destoffset); imm = (s8) fetch_byte_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); destval = (*genop_word_operation[rh]) (destval, imm); if (rh != 7) store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; u32 destval,imm; destreg = DECODE_RM_LONG_REGISTER(rl); imm = (s8) fetch_byte_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); destval = (*genop_long_operation[rh]) (*destreg, imm); if (rh != 7) *destreg = destval; } else { u16 *destreg; u16 destval,imm; destreg = DECODE_RM_WORD_REGISTER(rl); imm = (s8) fetch_byte_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); destval = (*genop_word_operation[rh]) (*destreg, imm); if (rh != 7) *destreg = destval; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x84 ****************************************************************************/ void x86emuOp_test_byte_RM_R(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg, *srcreg; uint destoffset; u8 destval; START_OF_INSTR(); DECODE_PRINTF("TEST\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(","); destval = fetch_data_byte(destoffset); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); test_byte(destval, *srcreg); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); test_byte(*destreg, *srcreg); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x85 ****************************************************************************/ void x86emuOp_test_word_RM_R(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; START_OF_INSTR(); DECODE_PRINTF("TEST\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; u32 *srcreg; DECODE_PRINTF(","); destval = fetch_data_long(destoffset); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); test_long(destval, *srcreg); } else { u16 destval; u16 *srcreg; DECODE_PRINTF(","); destval = fetch_data_word(destoffset); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); test_word(destval, *srcreg); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg,*srcreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); test_long(*destreg, *srcreg); } else { u16 *destreg,*srcreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); test_word(*destreg, *srcreg); } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x86 ****************************************************************************/ void x86emuOp_xchg_byte_RM_R(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg, *srcreg; uint destoffset; u8 destval; u8 tmp; START_OF_INSTR(); DECODE_PRINTF("XCHG\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(","); destval = fetch_data_byte(destoffset); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = *srcreg; *srcreg = destval; destval = tmp; store_data_byte(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = *srcreg; *srcreg = *destreg; *destreg = tmp; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x87 ****************************************************************************/ void x86emuOp_xchg_word_RM_R(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; START_OF_INSTR(); DECODE_PRINTF("XCHG\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(","); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *srcreg; u32 destval,tmp; destval = fetch_data_long(destoffset); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = *srcreg; *srcreg = destval; destval = tmp; store_data_long(destoffset, destval); } else { u16 *srcreg; u16 destval,tmp; destval = fetch_data_word(destoffset); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = *srcreg; *srcreg = destval; destval = tmp; store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg,*srcreg; u32 tmp; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = *srcreg; *srcreg = *destreg; *destreg = tmp; } else { u16 *destreg,*srcreg; u16 tmp; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = *srcreg; *srcreg = *destreg; *destreg = tmp; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x88 ****************************************************************************/ void x86emuOp_mov_byte_RM_R(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg, *srcreg; uint destoffset; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(","); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); store_data_byte(destoffset, *srcreg); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x89 ****************************************************************************/ void x86emuOp_mov_word_RM_R(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *srcreg; DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); store_data_long(destoffset, *srcreg); } else { u16 *srcreg; DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); store_data_word(destoffset, *srcreg); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg,*srcreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } else { u16 *destreg,*srcreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x8a ****************************************************************************/ void x86emuOp_mov_byte_R_RM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg, *srcreg; uint srcoffset; u8 srcval; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF(","); srcoffset = decode_rmXX_address(mod, rl); srcval = fetch_data_byte(srcoffset); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = srcval; } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x8b ****************************************************************************/ void x86emuOp_mov_word_R_RM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint srcoffset; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; u32 srcval; destreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcoffset = decode_rmXX_address(mod, rl); srcval = fetch_data_long(srcoffset); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = srcval; } else { u16 *destreg; u16 srcval; destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcoffset = decode_rmXX_address(mod, rl); srcval = fetch_data_word(srcoffset); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = srcval; } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg, *srcreg; destreg = DECODE_RM_LONG_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } else { u16 *destreg, *srcreg; destreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x8c ****************************************************************************/ void x86emuOp_mov_word_RM_SR(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u16 *destreg, *srcreg; uint destoffset; u16 destval; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(","); srcreg = decode_rm_seg_register(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = *srcreg; store_data_word(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(","); srcreg = decode_rm_seg_register(rh); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x8d ****************************************************************************/ void x86emuOp_lea_word_R_M(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u16 *srcreg; uint destoffset; /* * TODO: Need to handle address size prefix! * * lea eax,[eax+ebx*2] ?? */ START_OF_INSTR(); DECODE_PRINTF("LEA\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { srcreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *srcreg = (u16)destoffset; } /* } else { undefined. Do nothing. } */ DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x8e ****************************************************************************/ void x86emuOp_mov_word_SR_RM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u16 *destreg, *srcreg; uint srcoffset; u16 srcval; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { destreg = decode_rm_seg_register(rh); DECODE_PRINTF(","); srcoffset = decode_rmXX_address(mod, rl); srcval = fetch_data_word(srcoffset); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = srcval; } else { /* register to register */ destreg = decode_rm_seg_register(rh); DECODE_PRINTF(","); srcreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = *srcreg; } /* * Clean up, and reset all the R_xSP pointers to the correct * locations. This is about 3x too much overhead (doing all the * segreg ptrs when only one is needed, but this instruction * *cannot* be that common, and this isn't too much work anyway. */ DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x8f ****************************************************************************/ void x86emuOp_pop_RM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; START_OF_INSTR(); DECODE_PRINTF("POP\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (rh != 0) { DECODE_PRINTF("ILLEGAL DECODE OF OPCODE 8F\n"); HALT_SYS(); } if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = pop_long(); store_data_long(destoffset, destval); } else { u16 destval; DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = pop_word(); store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = pop_long(); } else { u16 *destreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = pop_word(); } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x90 ****************************************************************************/ void x86emuOp_nop(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("NOP\n"); TRACE_AND_STEP(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x91-0x97 ****************************************************************************/ void x86emuOp_xchg_word_AX_register(u8 X86EMU_UNUSED(op1)) { u32 tmp; op1 &= 0x7; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *reg32; DECODE_PRINTF("XCHG\tEAX,"); reg32 = DECODE_RM_LONG_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = M.x86.R_EAX; M.x86.R_EAX = *reg32; *reg32 = tmp; } else { u16 *reg16; DECODE_PRINTF("XCHG\tAX,"); reg16 = DECODE_RM_WORD_REGISTER(op1); DECODE_PRINTF("\n"); TRACE_AND_STEP(); tmp = M.x86.R_AX; M.x86.R_EAX = *reg16; *reg16 = (u16)tmp; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x98 ****************************************************************************/ void x86emuOp_cbw(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("CWDE\n"); } else { DECODE_PRINTF("CBW\n"); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { if (M.x86.R_AX & 0x8000) { M.x86.R_EAX |= 0xffff0000; } else { M.x86.R_EAX &= 0x0000ffff; } } else { if (M.x86.R_AL & 0x80) { M.x86.R_AH = 0xff; } else { M.x86.R_AH = 0x0; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x99 ****************************************************************************/ void x86emuOp_cwd(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("CDQ\n"); } else { DECODE_PRINTF("CWD\n"); } DECODE_PRINTF("CWD\n"); TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { if (M.x86.R_EAX & 0x80000000) { M.x86.R_EDX = 0xffffffff; } else { M.x86.R_EDX = 0x0; } } else { if (M.x86.R_AX & 0x8000) { M.x86.R_DX = 0xffff; } else { M.x86.R_DX = 0x0; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x9a ****************************************************************************/ void x86emuOp_call_far_IMM(u8 X86EMU_UNUSED(op1)) { u16 farseg, faroff; START_OF_INSTR(); DECODE_PRINTF("CALL\t"); faroff = fetch_word_imm(); farseg = fetch_word_imm(); DECODE_PRINTF2("%04x:", farseg); DECODE_PRINTF2("%04x\n", faroff); CALL_TRACE(M.x86.saved_cs, M.x86.saved_ip, farseg, faroff, "FAR "); /* XXX * * Hooked interrupt vectors calling into our "BIOS" will cause * problems unless all intersegment stuff is checked for BIOS * access. Check needed here. For moment, let it alone. */ TRACE_AND_STEP(); push_word(M.x86.R_CS); M.x86.R_CS = farseg; push_word(M.x86.R_IP); M.x86.R_IP = faroff; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x9b ****************************************************************************/ void x86emuOp_wait(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("WAIT"); TRACE_AND_STEP(); /* NADA. */ DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x9c ****************************************************************************/ void x86emuOp_pushf_word(u8 X86EMU_UNUSED(op1)) { u32 flags; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("PUSHFD\n"); } else { DECODE_PRINTF("PUSHF\n"); } TRACE_AND_STEP(); /* clear out *all* bits not representing flags, and turn on real bits */ flags = (M.x86.R_EFLG & F_MSK) | F_ALWAYS_ON; if (M.x86.mode & SYSMODE_PREFIX_DATA) { push_long(flags); } else { push_word((u16)flags); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x9d ****************************************************************************/ void x86emuOp_popf_word(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("POPFD\n"); } else { DECODE_PRINTF("POPF\n"); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { M.x86.R_EFLG = pop_long(); } else { M.x86.R_FLG = pop_word(); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x9e ****************************************************************************/ void x86emuOp_sahf(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("SAHF\n"); TRACE_AND_STEP(); /* clear the lower bits of the flag register */ M.x86.R_FLG &= 0xffffff00; /* or in the AH register into the flags register */ M.x86.R_FLG |= M.x86.R_AH; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0x9f ****************************************************************************/ void x86emuOp_lahf(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("LAHF\n"); TRACE_AND_STEP(); M.x86.R_AH = (u8)(M.x86.R_FLG & 0xff); /*undocumented TC++ behavior??? Nope. It's documented, but you have too look real hard to notice it. */ M.x86.R_AH |= 0x2; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa0 ****************************************************************************/ void x86emuOp_mov_AL_M_IMM(u8 X86EMU_UNUSED(op1)) { u16 offset; START_OF_INSTR(); DECODE_PRINTF("MOV\tAL,"); offset = fetch_word_imm(); DECODE_PRINTF2("[%04x]\n", offset); TRACE_AND_STEP(); M.x86.R_AL = fetch_data_byte(offset); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa1 ****************************************************************************/ void x86emuOp_mov_AX_M_IMM(u8 X86EMU_UNUSED(op1)) { u16 offset; START_OF_INSTR(); offset = fetch_word_imm(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF2("MOV\tEAX,[%04x]\n", offset); } else { DECODE_PRINTF2("MOV\tAX,[%04x]\n", offset); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { M.x86.R_EAX = fetch_data_long(offset); } else { M.x86.R_AX = fetch_data_word(offset); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa2 ****************************************************************************/ void x86emuOp_mov_M_AL_IMM(u8 X86EMU_UNUSED(op1)) { u16 offset; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); offset = fetch_word_imm(); DECODE_PRINTF2("[%04x],AL\n", offset); TRACE_AND_STEP(); store_data_byte(offset, M.x86.R_AL); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa3 ****************************************************************************/ void x86emuOp_mov_M_AX_IMM(u8 X86EMU_UNUSED(op1)) { u16 offset; START_OF_INSTR(); offset = fetch_word_imm(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF2("MOV\t[%04x],EAX\n", offset); } else { DECODE_PRINTF2("MOV\t[%04x],AX\n", offset); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { store_data_long(offset, M.x86.R_EAX); } else { store_data_word(offset, M.x86.R_AX); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa4 ****************************************************************************/ void x86emuOp_movs_byte(u8 X86EMU_UNUSED(op1)) { u8 val; u32 count; int inc; START_OF_INSTR(); DECODE_PRINTF("MOVS\tBYTE\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -1; else inc = 1; TRACE_AND_STEP(); count = 1; if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* dont care whether REPE or REPNE */ /* move them until CX is ZERO. */ count = M.x86.R_CX; M.x86.R_CX = 0; M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } while (count--) { val = fetch_data_byte(M.x86.R_SI); store_data_byte_abs(M.x86.R_ES, M.x86.R_DI, val); M.x86.R_SI += inc; M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa5 ****************************************************************************/ void x86emuOp_movs_word(u8 X86EMU_UNUSED(op1)) { u32 val; int inc; u32 count; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("MOVS\tDWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -4; else inc = 4; } else { DECODE_PRINTF("MOVS\tWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -2; else inc = 2; } TRACE_AND_STEP(); count = 1; if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* dont care whether REPE or REPNE */ /* move them until CX is ZERO. */ count = M.x86.R_CX; M.x86.R_CX = 0; M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } while (count--) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { val = fetch_data_long(M.x86.R_SI); store_data_long_abs(M.x86.R_ES, M.x86.R_DI, val); } else { val = fetch_data_word(M.x86.R_SI); store_data_word_abs(M.x86.R_ES, M.x86.R_DI, (u16)val); } M.x86.R_SI += inc; M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa6 ****************************************************************************/ void x86emuOp_cmps_byte(u8 X86EMU_UNUSED(op1)) { s8 val1, val2; int inc; START_OF_INSTR(); DECODE_PRINTF("CMPS\tBYTE\n"); TRACE_AND_STEP(); if (ACCESS_FLAG(F_DF)) /* down */ inc = -1; else inc = 1; if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* REPE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { val1 = fetch_data_byte(M.x86.R_SI); val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); cmp_byte(val1, val2); M.x86.R_CX -= 1; M.x86.R_SI += inc; M.x86.R_DI += inc; if ( (M.x86.mode & SYSMODE_PREFIX_REPE) && (ACCESS_FLAG(F_ZF) == 0) ) break; if ( (M.x86.mode & SYSMODE_PREFIX_REPNE) && ACCESS_FLAG(F_ZF) ) break; } M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } else { val1 = fetch_data_byte(M.x86.R_SI); val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); cmp_byte(val1, val2); M.x86.R_SI += inc; M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa7 ****************************************************************************/ void x86emuOp_cmps_word(u8 X86EMU_UNUSED(op1)) { u32 val1,val2; int inc; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("CMPS\tDWORD\n"); inc = 4; } else { DECODE_PRINTF("CMPS\tWORD\n"); inc = 2; } if (ACCESS_FLAG(F_DF)) /* down */ inc = -inc; TRACE_AND_STEP(); if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* REPE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { val1 = fetch_data_long(M.x86.R_SI); val2 = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); cmp_long(val1, val2); } else { val1 = fetch_data_word(M.x86.R_SI); val2 = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); cmp_word((u16)val1, (u16)val2); } M.x86.R_CX -= 1; M.x86.R_SI += inc; M.x86.R_DI += inc; if ( (M.x86.mode & SYSMODE_PREFIX_REPE) && ACCESS_FLAG(F_ZF) == 0 ) break; if ( (M.x86.mode & SYSMODE_PREFIX_REPNE) && ACCESS_FLAG(F_ZF) ) break; } M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } else { if (M.x86.mode & SYSMODE_PREFIX_DATA) { val1 = fetch_data_long(M.x86.R_SI); val2 = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); cmp_long(val1, val2); } else { val1 = fetch_data_word(M.x86.R_SI); val2 = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); cmp_word((u16)val1, (u16)val2); } M.x86.R_SI += inc; M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa8 ****************************************************************************/ void x86emuOp_test_AL_IMM(u8 X86EMU_UNUSED(op1)) { int imm; START_OF_INSTR(); DECODE_PRINTF("TEST\tAL,"); imm = fetch_byte_imm(); DECODE_PRINTF2("%04x\n", imm); TRACE_AND_STEP(); test_byte(M.x86.R_AL, (u8)imm); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xa9 ****************************************************************************/ void x86emuOp_test_AX_IMM(u8 X86EMU_UNUSED(op1)) { u32 srcval; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("TEST\tEAX,"); srcval = fetch_long_imm(); } else { DECODE_PRINTF("TEST\tAX,"); srcval = fetch_word_imm(); } DECODE_PRINTF2("%x\n", srcval); TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { test_long(M.x86.R_EAX, srcval); } else { test_word(M.x86.R_AX, (u16)srcval); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xaa ****************************************************************************/ void x86emuOp_stos_byte(u8 X86EMU_UNUSED(op1)) { int inc; START_OF_INSTR(); DECODE_PRINTF("STOS\tBYTE\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -1; else inc = 1; TRACE_AND_STEP(); if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* dont care whether REPE or REPNE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { store_data_byte_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_AL); M.x86.R_CX -= 1; M.x86.R_DI += inc; } M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } else { store_data_byte_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_AL); M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xab ****************************************************************************/ void x86emuOp_stos_word(u8 X86EMU_UNUSED(op1)) { int inc; u32 count; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("STOS\tDWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -4; else inc = 4; } else { DECODE_PRINTF("STOS\tWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -2; else inc = 2; } TRACE_AND_STEP(); count = 1; if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* dont care whether REPE or REPNE */ /* move them until CX is ZERO. */ count = M.x86.R_CX; M.x86.R_CX = 0; M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } while (count--) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { store_data_long_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_EAX); } else { store_data_word_abs(M.x86.R_ES, M.x86.R_DI, M.x86.R_AX); } M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xac ****************************************************************************/ void x86emuOp_lods_byte(u8 X86EMU_UNUSED(op1)) { int inc; START_OF_INSTR(); DECODE_PRINTF("LODS\tBYTE\n"); TRACE_AND_STEP(); if (ACCESS_FLAG(F_DF)) /* down */ inc = -1; else inc = 1; if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* dont care whether REPE or REPNE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { M.x86.R_AL = fetch_data_byte(M.x86.R_SI); M.x86.R_CX -= 1; M.x86.R_SI += inc; } M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } else { M.x86.R_AL = fetch_data_byte(M.x86.R_SI); M.x86.R_SI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xad ****************************************************************************/ void x86emuOp_lods_word(u8 X86EMU_UNUSED(op1)) { int inc; u32 count; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("LODS\tDWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -4; else inc = 4; } else { DECODE_PRINTF("LODS\tWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -2; else inc = 2; } TRACE_AND_STEP(); count = 1; if (M.x86.mode & (SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE)) { /* dont care whether REPE or REPNE */ /* move them until CX is ZERO. */ count = M.x86.R_CX; M.x86.R_CX = 0; M.x86.mode &= ~(SYSMODE_PREFIX_REPE | SYSMODE_PREFIX_REPNE); } while (count--) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { M.x86.R_EAX = fetch_data_long(M.x86.R_SI); } else { M.x86.R_AX = fetch_data_word(M.x86.R_SI); } M.x86.R_SI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xae ****************************************************************************/ void x86emuOp_scas_byte(u8 X86EMU_UNUSED(op1)) { s8 val2; int inc; START_OF_INSTR(); DECODE_PRINTF("SCAS\tBYTE\n"); TRACE_AND_STEP(); if (ACCESS_FLAG(F_DF)) /* down */ inc = -1; else inc = 1; if (M.x86.mode & SYSMODE_PREFIX_REPE) { /* REPE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); cmp_byte(M.x86.R_AL, val2); M.x86.R_CX -= 1; M.x86.R_DI += inc; if (ACCESS_FLAG(F_ZF) == 0) break; } M.x86.mode &= ~SYSMODE_PREFIX_REPE; } else if (M.x86.mode & SYSMODE_PREFIX_REPNE) { /* REPNE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); cmp_byte(M.x86.R_AL, val2); M.x86.R_CX -= 1; M.x86.R_DI += inc; if (ACCESS_FLAG(F_ZF)) break; /* zero flag set means equal */ } M.x86.mode &= ~SYSMODE_PREFIX_REPNE; } else { val2 = fetch_data_byte_abs(M.x86.R_ES, M.x86.R_DI); cmp_byte(M.x86.R_AL, val2); M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xaf ****************************************************************************/ void x86emuOp_scas_word(u8 X86EMU_UNUSED(op1)) { int inc; u32 val; START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("SCAS\tDWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -4; else inc = 4; } else { DECODE_PRINTF("SCAS\tWORD\n"); if (ACCESS_FLAG(F_DF)) /* down */ inc = -2; else inc = 2; } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_REPE) { /* REPE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { val = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); cmp_long(M.x86.R_EAX, val); } else { val = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); cmp_word(M.x86.R_AX, (u16)val); } M.x86.R_CX -= 1; M.x86.R_DI += inc; if (ACCESS_FLAG(F_ZF) == 0) break; } M.x86.mode &= ~SYSMODE_PREFIX_REPE; } else if (M.x86.mode & SYSMODE_PREFIX_REPNE) { /* REPNE */ /* move them until CX is ZERO. */ while (M.x86.R_CX != 0) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { val = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); cmp_long(M.x86.R_EAX, val); } else { val = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); cmp_word(M.x86.R_AX, (u16)val); } M.x86.R_CX -= 1; M.x86.R_DI += inc; if (ACCESS_FLAG(F_ZF)) break; /* zero flag set means equal */ } M.x86.mode &= ~SYSMODE_PREFIX_REPNE; } else { if (M.x86.mode & SYSMODE_PREFIX_DATA) { val = fetch_data_long_abs(M.x86.R_ES, M.x86.R_DI); cmp_long(M.x86.R_EAX, val); } else { val = fetch_data_word_abs(M.x86.R_ES, M.x86.R_DI); cmp_word(M.x86.R_AX, (u16)val); } M.x86.R_DI += inc; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xb0 - 0xb7 ****************************************************************************/ void x86emuOp_mov_byte_register_IMM(u8 op1) { u8 imm, *ptr; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); ptr = DECODE_RM_BYTE_REGISTER(op1 & 0x7); DECODE_PRINTF(","); imm = fetch_byte_imm(); DECODE_PRINTF2("%x\n", imm); TRACE_AND_STEP(); *ptr = imm; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xb8 - 0xbf ****************************************************************************/ void x86emuOp_mov_word_register_IMM(u8 X86EMU_UNUSED(op1)) { u32 srcval; op1 &= 0x7; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *reg32; reg32 = DECODE_RM_LONG_REGISTER(op1); srcval = fetch_long_imm(); DECODE_PRINTF2(",%x\n", srcval); TRACE_AND_STEP(); *reg32 = srcval; } else { u16 *reg16; reg16 = DECODE_RM_WORD_REGISTER(op1); srcval = fetch_word_imm(); DECODE_PRINTF2(",%x\n", srcval); TRACE_AND_STEP(); *reg16 = (u16)srcval; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc0 ****************************************************************************/ void x86emuOp_opcC0_byte_RM_MEM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg; uint destoffset; u8 destval; u8 amt; /* * Yet another weirdo special case instruction format. Part of * the opcode held below in "RH". Doubly nested case would * result, except that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ROL\t"); break; case 1: DECODE_PRINTF("ROR\t"); break; case 2: DECODE_PRINTF("RCL\t"); break; case 3: DECODE_PRINTF("RCR\t"); break; case 4: DECODE_PRINTF("SHL\t"); break; case 5: DECODE_PRINTF("SHR\t"); break; case 6: DECODE_PRINTF("SAL\t"); break; case 7: DECODE_PRINTF("SAR\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); amt = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", amt); destval = fetch_data_byte(destoffset); TRACE_AND_STEP(); destval = (*opcD0_byte_operation[rh]) (destval, amt); store_data_byte(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); amt = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", amt); TRACE_AND_STEP(); destval = (*opcD0_byte_operation[rh]) (*destreg, amt); *destreg = destval; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc1 ****************************************************************************/ void x86emuOp_opcC1_word_RM_MEM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; u8 amt; /* * Yet another weirdo special case instruction format. Part of * the opcode held below in "RH". Doubly nested case would * result, except that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ROL\t"); break; case 1: DECODE_PRINTF("ROR\t"); break; case 2: DECODE_PRINTF("RCL\t"); break; case 3: DECODE_PRINTF("RCR\t"); break; case 4: DECODE_PRINTF("SHL\t"); break; case 5: DECODE_PRINTF("SHR\t"); break; case 6: DECODE_PRINTF("SAL\t"); break; case 7: DECODE_PRINTF("SAR\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ if (mod < 3) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; DECODE_PRINTF("DWORD PTR "); destoffset = decode_rmXX_address(mod, rl); amt = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", amt); destval = fetch_data_long(destoffset); TRACE_AND_STEP(); destval = (*opcD1_long_operation[rh]) (destval, amt); store_data_long(destoffset, destval); } else { u16 destval; DECODE_PRINTF("WORD PTR "); destoffset = decode_rmXX_address(mod, rl); amt = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", amt); destval = fetch_data_word(destoffset); TRACE_AND_STEP(); destval = (*opcD1_word_operation[rh]) (destval, amt); store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; destreg = DECODE_RM_LONG_REGISTER(rl); amt = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", amt); TRACE_AND_STEP(); *destreg = (*opcD1_long_operation[rh]) (*destreg, amt); } else { u16 *destreg; destreg = DECODE_RM_WORD_REGISTER(rl); amt = fetch_byte_imm(); DECODE_PRINTF2(",%x\n", amt); TRACE_AND_STEP(); *destreg = (*opcD1_word_operation[rh]) (*destreg, amt); } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc2 ****************************************************************************/ void x86emuOp_ret_near_IMM(u8 X86EMU_UNUSED(op1)) { u16 imm; START_OF_INSTR(); DECODE_PRINTF("RET\t"); imm = fetch_word_imm(); DECODE_PRINTF2("%x\n", imm); RETURN_TRACE("RET",M.x86.saved_cs,M.x86.saved_ip); TRACE_AND_STEP(); M.x86.R_IP = pop_word(); M.x86.R_SP += imm; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc3 ****************************************************************************/ void x86emuOp_ret_near(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("RET\n"); RETURN_TRACE("RET",M.x86.saved_cs,M.x86.saved_ip); TRACE_AND_STEP(); M.x86.R_IP = pop_word(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc4 ****************************************************************************/ void x86emuOp_les_R_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rh, rl; u16 *dstreg; uint srcoffset; START_OF_INSTR(); DECODE_PRINTF("LES\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { dstreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *dstreg = fetch_data_word(srcoffset); M.x86.R_ES = fetch_data_word(srcoffset + 2); } /* else UNDEFINED! register to register */ DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc5 ****************************************************************************/ void x86emuOp_lds_R_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rh, rl; u16 *dstreg; uint srcoffset; START_OF_INSTR(); DECODE_PRINTF("LDS\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (mod < 3) { dstreg = DECODE_RM_WORD_REGISTER(rh); DECODE_PRINTF(","); srcoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *dstreg = fetch_data_word(srcoffset); M.x86.R_DS = fetch_data_word(srcoffset + 2); } /* else UNDEFINED! */ DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc6 ****************************************************************************/ void x86emuOp_mov_byte_RM_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg; uint destoffset; u8 imm; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (rh != 0) { DECODE_PRINTF("ILLEGAL DECODE OF OPCODE c6\n"); HALT_SYS(); } if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); imm = fetch_byte_imm(); DECODE_PRINTF2(",%2x\n", imm); TRACE_AND_STEP(); store_data_byte(destoffset, imm); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); imm = fetch_byte_imm(); DECODE_PRINTF2(",%2x\n", imm); TRACE_AND_STEP(); *destreg = imm; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc7 ****************************************************************************/ void x86emuOp_mov_word_RM_IMM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; START_OF_INSTR(); DECODE_PRINTF("MOV\t"); FETCH_DECODE_MODRM(mod, rh, rl); if (rh != 0) { DECODE_PRINTF("ILLEGAL DECODE OF OPCODE 8F\n"); HALT_SYS(); } if (mod < 3) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 imm; DECODE_PRINTF("DWORD PTR "); destoffset = decode_rmXX_address(mod, rl); imm = fetch_long_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); store_data_long(destoffset, imm); } else { u16 imm; DECODE_PRINTF("WORD PTR "); destoffset = decode_rmXX_address(mod, rl); imm = fetch_word_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); store_data_word(destoffset, imm); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; u32 imm; destreg = DECODE_RM_LONG_REGISTER(rl); imm = fetch_long_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); *destreg = imm; } else { u16 *destreg; u16 imm; destreg = DECODE_RM_WORD_REGISTER(rl); imm = fetch_word_imm(); DECODE_PRINTF2(",%x\n", imm); TRACE_AND_STEP(); *destreg = imm; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc8 ****************************************************************************/ void x86emuOp_enter(u8 X86EMU_UNUSED(op1)) { u16 local,frame_pointer; u8 nesting; int i; START_OF_INSTR(); local = fetch_word_imm(); nesting = fetch_byte_imm(); DECODE_PRINTF2("ENTER %x\n", local); DECODE_PRINTF2(",%x\n", nesting); TRACE_AND_STEP(); push_word(M.x86.R_BP); frame_pointer = M.x86.R_SP; if (nesting > 0) { for (i = 1; i < nesting; i++) { M.x86.R_BP -= 2; push_word(fetch_data_word_abs(M.x86.R_SS, M.x86.R_BP)); } push_word(frame_pointer); } M.x86.R_BP = frame_pointer; M.x86.R_SP = (u16)(M.x86.R_SP - local); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xc9 ****************************************************************************/ void x86emuOp_leave(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("LEAVE\n"); TRACE_AND_STEP(); M.x86.R_SP = M.x86.R_BP; M.x86.R_BP = pop_word(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xca ****************************************************************************/ void x86emuOp_ret_far_IMM(u8 X86EMU_UNUSED(op1)) { u16 imm; START_OF_INSTR(); DECODE_PRINTF("RETF\t"); imm = fetch_word_imm(); DECODE_PRINTF2("%x\n", imm); RETURN_TRACE("RETF",M.x86.saved_cs,M.x86.saved_ip); TRACE_AND_STEP(); M.x86.R_IP = pop_word(); M.x86.R_CS = pop_word(); M.x86.R_SP += imm; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xcb ****************************************************************************/ void x86emuOp_ret_far(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("RETF\n"); RETURN_TRACE("RETF",M.x86.saved_cs,M.x86.saved_ip); TRACE_AND_STEP(); M.x86.R_IP = pop_word(); M.x86.R_CS = pop_word(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xcc ****************************************************************************/ void x86emuOp_int3(u8 X86EMU_UNUSED(op1)) { u16 tmp; START_OF_INSTR(); DECODE_PRINTF("INT 3\n"); tmp = (u16) mem_access_word(3 * 4 + 2); /* access the segment register */ TRACE_AND_STEP(); if (_X86EMU_intrTab[3]) { (*_X86EMU_intrTab[3])(3); } else { push_word((u16)M.x86.R_FLG); CLEAR_FLAG(F_IF); CLEAR_FLAG(F_TF); push_word(M.x86.R_CS); M.x86.R_CS = mem_access_word(3 * 4 + 2); push_word(M.x86.R_IP); M.x86.R_IP = mem_access_word(3 * 4); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xcd ****************************************************************************/ void x86emuOp_int_IMM(u8 X86EMU_UNUSED(op1)) { u16 tmp; u8 intnum; START_OF_INSTR(); DECODE_PRINTF("INT\t"); intnum = fetch_byte_imm(); DECODE_PRINTF2("%x\n", intnum); tmp = mem_access_word(intnum * 4 + 2); TRACE_AND_STEP(); if (_X86EMU_intrTab[intnum]) { (*_X86EMU_intrTab[intnum])(intnum); } else { push_word((u16)M.x86.R_FLG); CLEAR_FLAG(F_IF); CLEAR_FLAG(F_TF); push_word(M.x86.R_CS); M.x86.R_CS = mem_access_word(intnum * 4 + 2); push_word(M.x86.R_IP); M.x86.R_IP = mem_access_word(intnum * 4); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xce ****************************************************************************/ void x86emuOp_into(u8 X86EMU_UNUSED(op1)) { u16 tmp; START_OF_INSTR(); DECODE_PRINTF("INTO\n"); TRACE_AND_STEP(); if (ACCESS_FLAG(F_OF)) { tmp = mem_access_word(4 * 4 + 2); if (_X86EMU_intrTab[4]) { (*_X86EMU_intrTab[4])(4); } else { push_word((u16)M.x86.R_FLG); CLEAR_FLAG(F_IF); CLEAR_FLAG(F_TF); push_word(M.x86.R_CS); M.x86.R_CS = mem_access_word(4 * 4 + 2); push_word(M.x86.R_IP); M.x86.R_IP = mem_access_word(4 * 4); } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xcf ****************************************************************************/ void x86emuOp_iret(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("IRET\n"); TRACE_AND_STEP(); M.x86.R_IP = pop_word(); M.x86.R_CS = pop_word(); M.x86.R_FLG = pop_word(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xd0 ****************************************************************************/ void x86emuOp_opcD0_byte_RM_1(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg; uint destoffset; u8 destval; /* * Yet another weirdo special case instruction format. Part of * the opcode held below in "RH". Doubly nested case would * result, except that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ROL\t"); break; case 1: DECODE_PRINTF("ROR\t"); break; case 2: DECODE_PRINTF("RCL\t"); break; case 3: DECODE_PRINTF("RCR\t"); break; case 4: DECODE_PRINTF("SHL\t"); break; case 5: DECODE_PRINTF("SHR\t"); break; case 6: DECODE_PRINTF("SAL\t"); break; case 7: DECODE_PRINTF("SAR\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(",1\n"); destval = fetch_data_byte(destoffset); TRACE_AND_STEP(); destval = (*opcD0_byte_operation[rh]) (destval, 1); store_data_byte(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF(",1\n"); TRACE_AND_STEP(); destval = (*opcD0_byte_operation[rh]) (*destreg, 1); *destreg = destval; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xd1 ****************************************************************************/ void x86emuOp_opcD1_word_RM_1(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; /* * Yet another weirdo special case instruction format. Part of * the opcode held below in "RH". Doubly nested case would * result, except that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ROL\t"); break; case 1: DECODE_PRINTF("ROR\t"); break; case 2: DECODE_PRINTF("RCL\t"); break; case 3: DECODE_PRINTF("RCR\t"); break; case 4: DECODE_PRINTF("SHL\t"); break; case 5: DECODE_PRINTF("SHR\t"); break; case 6: DECODE_PRINTF("SAL\t"); break; case 7: DECODE_PRINTF("SAR\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ if (mod < 3) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; DECODE_PRINTF("DWORD PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(",1\n"); destval = fetch_data_long(destoffset); TRACE_AND_STEP(); destval = (*opcD1_long_operation[rh]) (destval, 1); store_data_long(destoffset, destval); } else { u16 destval; DECODE_PRINTF("WORD PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(",1\n"); destval = fetch_data_word(destoffset); TRACE_AND_STEP(); destval = (*opcD1_word_operation[rh]) (destval, 1); store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; u32 *destreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF(",1\n"); TRACE_AND_STEP(); destval = (*opcD1_long_operation[rh]) (*destreg, 1); *destreg = destval; } else { u16 destval; u16 *destreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(",1\n"); TRACE_AND_STEP(); destval = (*opcD1_word_operation[rh]) (*destreg, 1); *destreg = destval; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xd2 ****************************************************************************/ void x86emuOp_opcD2_byte_RM_CL(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg; uint destoffset; u8 destval; u8 amt; /* * Yet another weirdo special case instruction format. Part of * the opcode held below in "RH". Doubly nested case would * result, except that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ROL\t"); break; case 1: DECODE_PRINTF("ROR\t"); break; case 2: DECODE_PRINTF("RCL\t"); break; case 3: DECODE_PRINTF("RCR\t"); break; case 4: DECODE_PRINTF("SHL\t"); break; case 5: DECODE_PRINTF("SHR\t"); break; case 6: DECODE_PRINTF("SAL\t"); break; case 7: DECODE_PRINTF("SAR\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ amt = M.x86.R_CL; if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(",CL\n"); destval = fetch_data_byte(destoffset); TRACE_AND_STEP(); destval = (*opcD0_byte_operation[rh]) (destval, amt); store_data_byte(destoffset, destval); } else { /* register to register */ destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF(",CL\n"); TRACE_AND_STEP(); destval = (*opcD0_byte_operation[rh]) (*destreg, amt); *destreg = destval; } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xd3 ****************************************************************************/ void x86emuOp_opcD3_word_RM_CL(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; u8 amt; /* * Yet another weirdo special case instruction format. Part of * the opcode held below in "RH". Doubly nested case would * result, except that the decoded instruction */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("ROL\t"); break; case 1: DECODE_PRINTF("ROR\t"); break; case 2: DECODE_PRINTF("RCL\t"); break; case 3: DECODE_PRINTF("RCR\t"); break; case 4: DECODE_PRINTF("SHL\t"); break; case 5: DECODE_PRINTF("SHR\t"); break; case 6: DECODE_PRINTF("SAL\t"); break; case 7: DECODE_PRINTF("SAR\t"); break; } } #endif /* know operation, decode the mod byte to find the addressing mode. */ amt = M.x86.R_CL; if (mod < 3) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; DECODE_PRINTF("DWORD PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(",CL\n"); destval = fetch_data_long(destoffset); TRACE_AND_STEP(); destval = (*opcD1_long_operation[rh]) (destval, amt); store_data_long(destoffset, destval); } else { u16 destval; DECODE_PRINTF("WORD PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF(",CL\n"); destval = fetch_data_word(destoffset); TRACE_AND_STEP(); destval = (*opcD1_word_operation[rh]) (destval, amt); store_data_word(destoffset, destval); } } else { /* register to register */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF(",CL\n"); TRACE_AND_STEP(); *destreg = (*opcD1_long_operation[rh]) (*destreg, amt); } else { u16 *destreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF(",CL\n"); TRACE_AND_STEP(); *destreg = (*opcD1_word_operation[rh]) (*destreg, amt); } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xd4 ****************************************************************************/ void x86emuOp_aam(u8 X86EMU_UNUSED(op1)) { u8 a; START_OF_INSTR(); DECODE_PRINTF("AAM\n"); a = fetch_byte_imm(); /* this is a stupid encoding. */ if (a != 10) { DECODE_PRINTF("ERROR DECODING AAM\n"); TRACE_REGS(); HALT_SYS(); } TRACE_AND_STEP(); /* note the type change here --- returning AL and AH in AX. */ M.x86.R_AX = aam_word(M.x86.R_AL); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xd5 ****************************************************************************/ void x86emuOp_aad(u8 X86EMU_UNUSED(op1)) { u8 a; START_OF_INSTR(); DECODE_PRINTF("AAD\n"); a = fetch_byte_imm(); TRACE_AND_STEP(); M.x86.R_AX = aad_word(M.x86.R_AX); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /* opcode 0xd6 ILLEGAL OPCODE */ /**************************************************************************** REMARKS: Handles opcode 0xd7 ****************************************************************************/ void x86emuOp_xlat(u8 X86EMU_UNUSED(op1)) { u16 addr; START_OF_INSTR(); DECODE_PRINTF("XLAT\n"); TRACE_AND_STEP(); addr = (u16)(M.x86.R_BX + (u8)M.x86.R_AL); M.x86.R_AL = fetch_data_byte(addr); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /* instuctions D8 .. DF are in i87_ops.c */ /**************************************************************************** REMARKS: Handles opcode 0xe0 ****************************************************************************/ void x86emuOp_loopne(u8 X86EMU_UNUSED(op1)) { s16 ip; START_OF_INSTR(); DECODE_PRINTF("LOOPNE\t"); ip = (s8) fetch_byte_imm(); ip += (s16) M.x86.R_IP; DECODE_PRINTF2("%04x\n", ip); TRACE_AND_STEP(); M.x86.R_CX -= 1; if (M.x86.R_CX != 0 && !ACCESS_FLAG(F_ZF)) /* CX != 0 and !ZF */ M.x86.R_IP = ip; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe1 ****************************************************************************/ void x86emuOp_loope(u8 X86EMU_UNUSED(op1)) { s16 ip; START_OF_INSTR(); DECODE_PRINTF("LOOPE\t"); ip = (s8) fetch_byte_imm(); ip += (s16) M.x86.R_IP; DECODE_PRINTF2("%04x\n", ip); TRACE_AND_STEP(); M.x86.R_CX -= 1; if (M.x86.R_CX != 0 && ACCESS_FLAG(F_ZF)) /* CX != 0 and ZF */ M.x86.R_IP = ip; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe2 ****************************************************************************/ void x86emuOp_loop(u8 X86EMU_UNUSED(op1)) { s16 ip; START_OF_INSTR(); DECODE_PRINTF("LOOP\t"); ip = (s8) fetch_byte_imm(); ip += (s16) M.x86.R_IP; DECODE_PRINTF2("%04x\n", ip); TRACE_AND_STEP(); M.x86.R_CX -= 1; if (M.x86.R_CX != 0) M.x86.R_IP = ip; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe3 ****************************************************************************/ void x86emuOp_jcxz(u8 X86EMU_UNUSED(op1)) { u16 target; s8 offset; /* jump to byte offset if overflow flag is set */ START_OF_INSTR(); DECODE_PRINTF("JCXZ\t"); offset = (s8)fetch_byte_imm(); target = (u16)(M.x86.R_IP + offset); DECODE_PRINTF2("%x\n", target); TRACE_AND_STEP(); if (M.x86.R_CX == 0) M.x86.R_IP = target; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe4 ****************************************************************************/ void x86emuOp_in_byte_AL_IMM(u8 X86EMU_UNUSED(op1)) { u8 port; START_OF_INSTR(); DECODE_PRINTF("IN\t"); port = (u8) fetch_byte_imm(); DECODE_PRINTF2("%x,AL\n", port); TRACE_AND_STEP(); M.x86.R_AL = (*sys_inb)(port); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe5 ****************************************************************************/ void x86emuOp_in_word_AX_IMM(u8 X86EMU_UNUSED(op1)) { u8 port; START_OF_INSTR(); DECODE_PRINTF("IN\t"); port = (u8) fetch_byte_imm(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF2("EAX,%x\n", port); } else { DECODE_PRINTF2("AX,%x\n", port); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { M.x86.R_EAX = (*sys_inl)(port); } else { M.x86.R_AX = (*sys_inw)(port); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe6 ****************************************************************************/ void x86emuOp_out_byte_IMM_AL(u8 X86EMU_UNUSED(op1)) { u8 port; START_OF_INSTR(); DECODE_PRINTF("OUT\t"); port = (u8) fetch_byte_imm(); DECODE_PRINTF2("%x,AL\n", port); TRACE_AND_STEP(); (*sys_outb)(port, M.x86.R_AL); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe7 ****************************************************************************/ void x86emuOp_out_word_IMM_AX(u8 X86EMU_UNUSED(op1)) { u8 port; START_OF_INSTR(); DECODE_PRINTF("OUT\t"); port = (u8) fetch_byte_imm(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF2("%x,EAX\n", port); } else { DECODE_PRINTF2("%x,AX\n", port); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { (*sys_outl)(port, M.x86.R_EAX); } else { (*sys_outw)(port, M.x86.R_AX); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe8 ****************************************************************************/ void x86emuOp_call_near_IMM(u8 X86EMU_UNUSED(op1)) { s16 ip; START_OF_INSTR(); DECODE_PRINTF("CALL\t"); ip = (s16) fetch_word_imm(); ip += (s16) M.x86.R_IP; /* CHECK SIGN */ DECODE_PRINTF2("%04x\n", ip); CALL_TRACE(M.x86.saved_cs, M.x86.saved_ip, M.x86.R_CS, ip, ""); TRACE_AND_STEP(); push_word(M.x86.R_IP); M.x86.R_IP = ip; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xe9 ****************************************************************************/ void x86emuOp_jump_near_IMM(u8 X86EMU_UNUSED(op1)) { int ip; START_OF_INSTR(); DECODE_PRINTF("JMP\t"); ip = (s16)fetch_word_imm(); ip += (s16)M.x86.R_IP; DECODE_PRINTF2("%04x\n", ip); TRACE_AND_STEP(); M.x86.R_IP = (u16)ip; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xea ****************************************************************************/ void x86emuOp_jump_far_IMM(u8 X86EMU_UNUSED(op1)) { u16 cs, ip; START_OF_INSTR(); DECODE_PRINTF("JMP\tFAR "); ip = fetch_word_imm(); cs = fetch_word_imm(); DECODE_PRINTF2("%04x:", cs); DECODE_PRINTF2("%04x\n", ip); TRACE_AND_STEP(); M.x86.R_IP = ip; M.x86.R_CS = cs; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xeb ****************************************************************************/ void x86emuOp_jump_byte_IMM(u8 X86EMU_UNUSED(op1)) { u16 target; s8 offset; START_OF_INSTR(); DECODE_PRINTF("JMP\t"); offset = (s8)fetch_byte_imm(); target = (u16)(M.x86.R_IP + offset); DECODE_PRINTF2("%x\n", target); TRACE_AND_STEP(); M.x86.R_IP = target; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xec ****************************************************************************/ void x86emuOp_in_byte_AL_DX(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("IN\tAL,DX\n"); TRACE_AND_STEP(); M.x86.R_AL = (*sys_inb)(M.x86.R_DX); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xed ****************************************************************************/ void x86emuOp_in_word_AX_DX(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("IN\tEAX,DX\n"); } else { DECODE_PRINTF("IN\tAX,DX\n"); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { M.x86.R_EAX = (*sys_inl)(M.x86.R_DX); } else { M.x86.R_AX = (*sys_inw)(M.x86.R_DX); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xee ****************************************************************************/ void x86emuOp_out_byte_DX_AL(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("OUT\tDX,AL\n"); TRACE_AND_STEP(); (*sys_outb)(M.x86.R_DX, M.x86.R_AL); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xef ****************************************************************************/ void x86emuOp_out_word_DX_AX(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("OUT\tDX,EAX\n"); } else { DECODE_PRINTF("OUT\tDX,AX\n"); } TRACE_AND_STEP(); if (M.x86.mode & SYSMODE_PREFIX_DATA) { (*sys_outl)(M.x86.R_DX, M.x86.R_EAX); } else { (*sys_outw)(M.x86.R_DX, M.x86.R_AX); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf0 ****************************************************************************/ void x86emuOp_lock(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("LOCK:\n"); TRACE_AND_STEP(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /*opcode 0xf1 ILLEGAL OPERATION */ /**************************************************************************** REMARKS: Handles opcode 0xf2 ****************************************************************************/ void x86emuOp_repne(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("REPNE\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_PREFIX_REPNE; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf3 ****************************************************************************/ void x86emuOp_repe(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("REPE\n"); TRACE_AND_STEP(); M.x86.mode |= SYSMODE_PREFIX_REPE; DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf4 ****************************************************************************/ void x86emuOp_halt(u8 X86EMU_UNUSED(op1)) { START_OF_INSTR(); DECODE_PRINTF("HALT\n"); TRACE_AND_STEP(); HALT_SYS(); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf5 ****************************************************************************/ void x86emuOp_cmc(u8 X86EMU_UNUSED(op1)) { /* complement the carry flag. */ START_OF_INSTR(); DECODE_PRINTF("CMC\n"); TRACE_AND_STEP(); TOGGLE_FLAG(F_CF); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf6 ****************************************************************************/ void x86emuOp_opcF6_byte_RM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; u8 *destreg; uint destoffset; u8 destval, srcval; /* long, drawn out code follows. Double switch for a total of 32 cases. */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); DECODE_PRINTF(opF6_names[rh]); if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); destval = fetch_data_byte(destoffset); switch (rh) { case 0: /* test byte imm */ DECODE_PRINTF(","); srcval = fetch_byte_imm(); DECODE_PRINTF2("%02x\n", srcval); TRACE_AND_STEP(); test_byte(destval, srcval); break; case 1: DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); HALT_SYS(); break; case 2: DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = not_byte(destval); store_data_byte(destoffset, destval); break; case 3: DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = neg_byte(destval); store_data_byte(destoffset, destval); break; case 4: DECODE_PRINTF("\n"); TRACE_AND_STEP(); mul_byte(destval); break; case 5: DECODE_PRINTF("\n"); TRACE_AND_STEP(); imul_byte(destval); break; case 6: DECODE_PRINTF("\n"); TRACE_AND_STEP(); div_byte(destval); break; default: DECODE_PRINTF("\n"); TRACE_AND_STEP(); idiv_byte(destval); break; } } else { /* mod=11 */ destreg = DECODE_RM_BYTE_REGISTER(rl); switch (rh) { case 0: /* test byte imm */ DECODE_PRINTF(","); srcval = fetch_byte_imm(); DECODE_PRINTF2("%02x\n", srcval); TRACE_AND_STEP(); test_byte(*destreg, srcval); break; case 1: DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); HALT_SYS(); break; case 2: DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = not_byte(*destreg); break; case 3: DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = neg_byte(*destreg); break; case 4: DECODE_PRINTF("\n"); TRACE_AND_STEP(); mul_byte(*destreg); /*!!! */ break; case 5: DECODE_PRINTF("\n"); TRACE_AND_STEP(); imul_byte(*destreg); break; case 6: DECODE_PRINTF("\n"); TRACE_AND_STEP(); div_byte(*destreg); break; default: DECODE_PRINTF("\n"); TRACE_AND_STEP(); idiv_byte(*destreg); break; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf7 ****************************************************************************/ void x86emuOp_opcF7_word_RM(u8 X86EMU_UNUSED(op1)) { int mod, rl, rh; uint destoffset; START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); DECODE_PRINTF(opF6_names[rh]); if (mod < 3) { if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval, srcval; DECODE_PRINTF("DWORD PTR "); destoffset = decode_rmXX_address(mod, rl); destval = fetch_data_long(destoffset); switch (rh) { case 0: DECODE_PRINTF(","); srcval = fetch_long_imm(); DECODE_PRINTF2("%x\n", srcval); TRACE_AND_STEP(); test_long(destval, srcval); break; case 1: DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F7\n"); HALT_SYS(); break; case 2: DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = not_long(destval); store_data_long(destoffset, destval); break; case 3: DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = neg_long(destval); store_data_long(destoffset, destval); break; case 4: DECODE_PRINTF("\n"); TRACE_AND_STEP(); mul_long(destval); break; case 5: DECODE_PRINTF("\n"); TRACE_AND_STEP(); imul_long(destval); break; case 6: DECODE_PRINTF("\n"); TRACE_AND_STEP(); div_long(destval); break; case 7: DECODE_PRINTF("\n"); TRACE_AND_STEP(); idiv_long(destval); break; } } else { u16 destval, srcval; DECODE_PRINTF("WORD PTR "); destoffset = decode_rmXX_address(mod, rl); destval = fetch_data_word(destoffset); switch (rh) { case 0: /* test word imm */ DECODE_PRINTF(","); srcval = fetch_word_imm(); DECODE_PRINTF2("%x\n", srcval); TRACE_AND_STEP(); test_word(destval, srcval); break; case 1: DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F7\n"); HALT_SYS(); break; case 2: DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = not_word(destval); store_data_word(destoffset, destval); break; case 3: DECODE_PRINTF("\n"); TRACE_AND_STEP(); destval = neg_word(destval); store_data_word(destoffset, destval); break; case 4: DECODE_PRINTF("\n"); TRACE_AND_STEP(); mul_word(destval); break; case 5: DECODE_PRINTF("\n"); TRACE_AND_STEP(); imul_word(destval); break; case 6: DECODE_PRINTF("\n"); TRACE_AND_STEP(); div_word(destval); break; case 7: DECODE_PRINTF("\n"); TRACE_AND_STEP(); idiv_word(destval); break; } } } else { /* mod=11 */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; u32 srcval; destreg = DECODE_RM_LONG_REGISTER(rl); switch (rh) { case 0: /* test word imm */ DECODE_PRINTF(","); srcval = fetch_long_imm(); DECODE_PRINTF2("%x\n", srcval); TRACE_AND_STEP(); test_long(*destreg, srcval); break; case 1: DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); HALT_SYS(); break; case 2: DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = not_long(*destreg); break; case 3: DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = neg_long(*destreg); break; case 4: DECODE_PRINTF("\n"); TRACE_AND_STEP(); mul_long(*destreg); /*!!! */ break; case 5: DECODE_PRINTF("\n"); TRACE_AND_STEP(); imul_long(*destreg); break; case 6: DECODE_PRINTF("\n"); TRACE_AND_STEP(); div_long(*destreg); break; case 7: DECODE_PRINTF("\n"); TRACE_AND_STEP(); idiv_long(*destreg); break; } } else { u16 *destreg; u16 srcval; destreg = DECODE_RM_WORD_REGISTER(rl); switch (rh) { case 0: /* test word imm */ DECODE_PRINTF(","); srcval = fetch_word_imm(); DECODE_PRINTF2("%x\n", srcval); TRACE_AND_STEP(); test_word(*destreg, srcval); break; case 1: DECODE_PRINTF("ILLEGAL OP MOD=00 RH=01 OP=F6\n"); HALT_SYS(); break; case 2: DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = not_word(*destreg); break; case 3: DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = neg_word(*destreg); break; case 4: DECODE_PRINTF("\n"); TRACE_AND_STEP(); mul_word(*destreg); /*!!! */ break; case 5: DECODE_PRINTF("\n"); TRACE_AND_STEP(); imul_word(*destreg); break; case 6: DECODE_PRINTF("\n"); TRACE_AND_STEP(); div_word(*destreg); break; case 7: DECODE_PRINTF("\n"); TRACE_AND_STEP(); idiv_word(*destreg); break; } } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf8 ****************************************************************************/ void x86emuOp_clc(u8 X86EMU_UNUSED(op1)) { /* clear the carry flag. */ START_OF_INSTR(); DECODE_PRINTF("CLC\n"); TRACE_AND_STEP(); CLEAR_FLAG(F_CF); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xf9 ****************************************************************************/ void x86emuOp_stc(u8 X86EMU_UNUSED(op1)) { /* set the carry flag. */ START_OF_INSTR(); DECODE_PRINTF("STC\n"); TRACE_AND_STEP(); SET_FLAG(F_CF); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xfa ****************************************************************************/ void x86emuOp_cli(u8 X86EMU_UNUSED(op1)) { /* clear interrupts. */ START_OF_INSTR(); DECODE_PRINTF("CLI\n"); TRACE_AND_STEP(); CLEAR_FLAG(F_IF); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xfb ****************************************************************************/ void x86emuOp_sti(u8 X86EMU_UNUSED(op1)) { /* enable interrupts. */ START_OF_INSTR(); DECODE_PRINTF("STI\n"); TRACE_AND_STEP(); SET_FLAG(F_IF); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xfc ****************************************************************************/ void x86emuOp_cld(u8 X86EMU_UNUSED(op1)) { /* clear interrupts. */ START_OF_INSTR(); DECODE_PRINTF("CLD\n"); TRACE_AND_STEP(); CLEAR_FLAG(F_DF); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xfd ****************************************************************************/ void x86emuOp_std(u8 X86EMU_UNUSED(op1)) { /* clear interrupts. */ START_OF_INSTR(); DECODE_PRINTF("STD\n"); TRACE_AND_STEP(); SET_FLAG(F_DF); DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xfe ****************************************************************************/ void x86emuOp_opcFE_byte_RM(u8 X86EMU_UNUSED(op1)) { int mod, rh, rl; u8 destval; uint destoffset; u8 *destreg; /* Yet another special case instruction. */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: DECODE_PRINTF("INC\t"); break; case 1: DECODE_PRINTF("DEC\t"); break; case 2: case 3: case 4: case 5: case 6: case 7: DECODE_PRINTF2("ILLEGAL OP MAJOR OP 0xFE MINOR OP %x \n", mod); HALT_SYS(); break; } } #endif if (mod < 3) { DECODE_PRINTF("BYTE PTR "); destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF("\n"); destval = fetch_data_byte(destoffset); TRACE_AND_STEP(); if (rh == 0) destval = inc_byte(destval); else destval = dec_byte(destval); store_data_byte(destoffset, destval); } else { destreg = DECODE_RM_BYTE_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); if (rh == 0) *destreg = inc_byte(*destreg); else *destreg = dec_byte(*destreg); } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /**************************************************************************** REMARKS: Handles opcode 0xff ****************************************************************************/ void x86emuOp_opcFF_word_RM(u8 X86EMU_UNUSED(op1)) { int mod, rh, rl; uint destoffset = 0; u16 *destreg; u16 destval,destval2; /* Yet another special case instruction. */ START_OF_INSTR(); FETCH_DECODE_MODRM(mod, rh, rl); #ifdef DEBUG if (DEBUG_DECODE()) { /* XXX DECODE_PRINTF may be changed to something more general, so that it is important to leave the strings in the same format, even though the result is that the above test is done twice. */ switch (rh) { case 0: if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("INC\tDWORD PTR "); } else { DECODE_PRINTF("INC\tWORD PTR "); } break; case 1: if (M.x86.mode & SYSMODE_PREFIX_DATA) { DECODE_PRINTF("DEC\tDWORD PTR "); } else { DECODE_PRINTF("DEC\tWORD PTR "); } break; case 2: DECODE_PRINTF("CALL\t "); break; case 3: DECODE_PRINTF("CALL\tFAR "); break; case 4: DECODE_PRINTF("JMP\t"); break; case 5: DECODE_PRINTF("JMP\tFAR "); break; case 6: DECODE_PRINTF("PUSH\t"); break; case 7: DECODE_PRINTF("ILLEGAL DECODING OF OPCODE FF\t"); HALT_SYS(); break; } } #endif if (mod < 3) { destoffset = decode_rmXX_address(mod, rl); DECODE_PRINTF("\n"); switch (rh) { case 0: /* inc word ptr ... */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; destval = fetch_data_long(destoffset); TRACE_AND_STEP(); destval = inc_long(destval); store_data_long(destoffset, destval); } else { u16 destval; destval = fetch_data_word(destoffset); TRACE_AND_STEP(); destval = inc_word(destval); store_data_word(destoffset, destval); } break; case 1: /* dec word ptr ... */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; destval = fetch_data_long(destoffset); TRACE_AND_STEP(); destval = dec_long(destval); store_data_long(destoffset, destval); } else { u16 destval; destval = fetch_data_word(destoffset); TRACE_AND_STEP(); destval = dec_word(destval); store_data_word(destoffset, destval); } break; case 2: /* call word ptr ... */ destval = fetch_data_word(destoffset); TRACE_AND_STEP(); push_word(M.x86.R_IP); M.x86.R_IP = destval; break; case 3: /* call far ptr ... */ destval = fetch_data_word(destoffset); destval2 = fetch_data_word(destoffset + 2); TRACE_AND_STEP(); push_word(M.x86.R_CS); M.x86.R_CS = destval2; push_word(M.x86.R_IP); M.x86.R_IP = destval; break; case 4: /* jmp word ptr ... */ destval = fetch_data_word(destoffset); TRACE_AND_STEP(); M.x86.R_IP = destval; break; case 5: /* jmp far ptr ... */ destval = fetch_data_word(destoffset); destval2 = fetch_data_word(destoffset + 2); TRACE_AND_STEP(); M.x86.R_IP = destval; M.x86.R_CS = destval2; break; case 6: /* push word ptr ... */ if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 destval; destval = fetch_data_long(destoffset); TRACE_AND_STEP(); push_long(destval); } else { u16 destval; destval = fetch_data_word(destoffset); TRACE_AND_STEP(); push_word(destval); } break; } } else { switch (rh) { case 0: if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = inc_long(*destreg); } else { u16 *destreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = inc_word(*destreg); } break; case 1: if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = dec_long(*destreg); } else { u16 *destreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); *destreg = dec_word(*destreg); } break; case 2: /* call word ptr ... */ destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); push_word(M.x86.R_IP); M.x86.R_IP = *destreg; break; case 3: /* jmp far ptr ... */ DECODE_PRINTF("OPERATION UNDEFINED 0XFF \n"); TRACE_AND_STEP(); HALT_SYS(); break; case 4: /* jmp ... */ destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); M.x86.R_IP = (u16) (*destreg); break; case 5: /* jmp far ptr ... */ DECODE_PRINTF("OPERATION UNDEFINED 0XFF \n"); TRACE_AND_STEP(); HALT_SYS(); break; case 6: if (M.x86.mode & SYSMODE_PREFIX_DATA) { u32 *destreg; destreg = DECODE_RM_LONG_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); push_long(*destreg); } else { u16 *destreg; destreg = DECODE_RM_WORD_REGISTER(rl); DECODE_PRINTF("\n"); TRACE_AND_STEP(); push_word(*destreg); } break; } } DECODE_CLEAR_SEGOVR(); END_OF_INSTR(); } /*************************************************************************** * Single byte operation code table: **************************************************************************/ void (*x86emu_optab[256])(u8) __attribute__ ((section(GOT2_TYPE))) = { /* 0x00 */ x86emuOp_genop_byte_RM_R, /* 0x01 */ x86emuOp_genop_word_RM_R, /* 0x02 */ x86emuOp_genop_byte_R_RM, /* 0x03 */ x86emuOp_genop_word_R_RM, /* 0x04 */ x86emuOp_genop_byte_AL_IMM, /* 0x05 */ x86emuOp_genop_word_AX_IMM, /* 0x06 */ x86emuOp_push_ES, /* 0x07 */ x86emuOp_pop_ES, /* 0x08 */ x86emuOp_genop_byte_RM_R, /* 0x09 */ x86emuOp_genop_word_RM_R, /* 0x0a */ x86emuOp_genop_byte_R_RM, /* 0x0b */ x86emuOp_genop_word_R_RM, /* 0x0c */ x86emuOp_genop_byte_AL_IMM, /* 0x0d */ x86emuOp_genop_word_AX_IMM, /* 0x0e */ x86emuOp_push_CS, /* 0x0f */ x86emuOp_two_byte, /* 0x10 */ x86emuOp_genop_byte_RM_R, /* 0x11 */ x86emuOp_genop_word_RM_R, /* 0x12 */ x86emuOp_genop_byte_R_RM, /* 0x13 */ x86emuOp_genop_word_R_RM, /* 0x14 */ x86emuOp_genop_byte_AL_IMM, /* 0x15 */ x86emuOp_genop_word_AX_IMM, /* 0x16 */ x86emuOp_push_SS, /* 0x17 */ x86emuOp_pop_SS, /* 0x18 */ x86emuOp_genop_byte_RM_R, /* 0x19 */ x86emuOp_genop_word_RM_R, /* 0x1a */ x86emuOp_genop_byte_R_RM, /* 0x1b */ x86emuOp_genop_word_R_RM, /* 0x1c */ x86emuOp_genop_byte_AL_IMM, /* 0x1d */ x86emuOp_genop_word_AX_IMM, /* 0x1e */ x86emuOp_push_DS, /* 0x1f */ x86emuOp_pop_DS, /* 0x20 */ x86emuOp_genop_byte_RM_R, /* 0x21 */ x86emuOp_genop_word_RM_R, /* 0x22 */ x86emuOp_genop_byte_R_RM, /* 0x23 */ x86emuOp_genop_word_R_RM, /* 0x24 */ x86emuOp_genop_byte_AL_IMM, /* 0x25 */ x86emuOp_genop_word_AX_IMM, /* 0x26 */ x86emuOp_segovr_ES, /* 0x27 */ x86emuOp_daa, /* 0x28 */ x86emuOp_genop_byte_RM_R, /* 0x29 */ x86emuOp_genop_word_RM_R, /* 0x2a */ x86emuOp_genop_byte_R_RM, /* 0x2b */ x86emuOp_genop_word_R_RM, /* 0x2c */ x86emuOp_genop_byte_AL_IMM, /* 0x2d */ x86emuOp_genop_word_AX_IMM, /* 0x2e */ x86emuOp_segovr_CS, /* 0x2f */ x86emuOp_das, /* 0x30 */ x86emuOp_genop_byte_RM_R, /* 0x31 */ x86emuOp_genop_word_RM_R, /* 0x32 */ x86emuOp_genop_byte_R_RM, /* 0x33 */ x86emuOp_genop_word_R_RM, /* 0x34 */ x86emuOp_genop_byte_AL_IMM, /* 0x35 */ x86emuOp_genop_word_AX_IMM, /* 0x36 */ x86emuOp_segovr_SS, /* 0x37 */ x86emuOp_aaa, /* 0x38 */ x86emuOp_genop_byte_RM_R, /* 0x39 */ x86emuOp_genop_word_RM_R, /* 0x3a */ x86emuOp_genop_byte_R_RM, /* 0x3b */ x86emuOp_genop_word_R_RM, /* 0x3c */ x86emuOp_genop_byte_AL_IMM, /* 0x3d */ x86emuOp_genop_word_AX_IMM, /* 0x3e */ x86emuOp_segovr_DS, /* 0x3f */ x86emuOp_aas, /* 0x40 */ x86emuOp_inc_register, /* 0x41 */ x86emuOp_inc_register, /* 0x42 */ x86emuOp_inc_register, /* 0x43 */ x86emuOp_inc_register, /* 0x44 */ x86emuOp_inc_register, /* 0x45 */ x86emuOp_inc_register, /* 0x46 */ x86emuOp_inc_register, /* 0x47 */ x86emuOp_inc_register, /* 0x48 */ x86emuOp_dec_register, /* 0x49 */ x86emuOp_dec_register, /* 0x4a */ x86emuOp_dec_register, /* 0x4b */ x86emuOp_dec_register, /* 0x4c */ x86emuOp_dec_register, /* 0x4d */ x86emuOp_dec_register, /* 0x4e */ x86emuOp_dec_register, /* 0x4f */ x86emuOp_dec_register, /* 0x50 */ x86emuOp_push_register, /* 0x51 */ x86emuOp_push_register, /* 0x52 */ x86emuOp_push_register, /* 0x53 */ x86emuOp_push_register, /* 0x54 */ x86emuOp_push_register, /* 0x55 */ x86emuOp_push_register, /* 0x56 */ x86emuOp_push_register, /* 0x57 */ x86emuOp_push_register, /* 0x58 */ x86emuOp_pop_register, /* 0x59 */ x86emuOp_pop_register, /* 0x5a */ x86emuOp_pop_register, /* 0x5b */ x86emuOp_pop_register, /* 0x5c */ x86emuOp_pop_register, /* 0x5d */ x86emuOp_pop_register, /* 0x5e */ x86emuOp_pop_register, /* 0x5f */ x86emuOp_pop_register, /* 0x60 */ x86emuOp_push_all, /* 0x61 */ x86emuOp_pop_all, /* 0x62 */ x86emuOp_illegal_op, /* bound */ /* 0x63 */ x86emuOp_illegal_op, /* arpl */ /* 0x64 */ x86emuOp_segovr_FS, /* 0x65 */ x86emuOp_segovr_GS, /* 0x66 */ x86emuOp_prefix_data, /* 0x67 */ x86emuOp_prefix_addr, /* 0x68 */ x86emuOp_push_word_IMM, /* 0x69 */ x86emuOp_imul_word_IMM, /* 0x6a */ x86emuOp_push_byte_IMM, /* 0x6b */ x86emuOp_imul_byte_IMM, /* 0x6c */ x86emuOp_ins_byte, /* 0x6d */ x86emuOp_ins_word, /* 0x6e */ x86emuOp_outs_byte, /* 0x6f */ x86emuOp_outs_word, /* 0x70 */ x86emuOp_jump_near_cond, /* 0x71 */ x86emuOp_jump_near_cond, /* 0x72 */ x86emuOp_jump_near_cond, /* 0x73 */ x86emuOp_jump_near_cond, /* 0x74 */ x86emuOp_jump_near_cond, /* 0x75 */ x86emuOp_jump_near_cond, /* 0x76 */ x86emuOp_jump_near_cond, /* 0x77 */ x86emuOp_jump_near_cond, /* 0x78 */ x86emuOp_jump_near_cond, /* 0x79 */ x86emuOp_jump_near_cond, /* 0x7a */ x86emuOp_jump_near_cond, /* 0x7b */ x86emuOp_jump_near_cond, /* 0x7c */ x86emuOp_jump_near_cond, /* 0x7d */ x86emuOp_jump_near_cond, /* 0x7e */ x86emuOp_jump_near_cond, /* 0x7f */ x86emuOp_jump_near_cond, /* 0x80 */ x86emuOp_opc80_byte_RM_IMM, /* 0x81 */ x86emuOp_opc81_word_RM_IMM, /* 0x82 */ x86emuOp_opc82_byte_RM_IMM, /* 0x83 */ x86emuOp_opc83_word_RM_IMM, /* 0x84 */ x86emuOp_test_byte_RM_R, /* 0x85 */ x86emuOp_test_word_RM_R, /* 0x86 */ x86emuOp_xchg_byte_RM_R, /* 0x87 */ x86emuOp_xchg_word_RM_R, /* 0x88 */ x86emuOp_mov_byte_RM_R, /* 0x89 */ x86emuOp_mov_word_RM_R, /* 0x8a */ x86emuOp_mov_byte_R_RM, /* 0x8b */ x86emuOp_mov_word_R_RM, /* 0x8c */ x86emuOp_mov_word_RM_SR, /* 0x8d */ x86emuOp_lea_word_R_M, /* 0x8e */ x86emuOp_mov_word_SR_RM, /* 0x8f */ x86emuOp_pop_RM, /* 0x90 */ x86emuOp_nop, /* 0x91 */ x86emuOp_xchg_word_AX_register, /* 0x92 */ x86emuOp_xchg_word_AX_register, /* 0x93 */ x86emuOp_xchg_word_AX_register, /* 0x94 */ x86emuOp_xchg_word_AX_register, /* 0x95 */ x86emuOp_xchg_word_AX_register, /* 0x96 */ x86emuOp_xchg_word_AX_register, /* 0x97 */ x86emuOp_xchg_word_AX_register, /* 0x98 */ x86emuOp_cbw, /* 0x99 */ x86emuOp_cwd, /* 0x9a */ x86emuOp_call_far_IMM, /* 0x9b */ x86emuOp_wait, /* 0x9c */ x86emuOp_pushf_word, /* 0x9d */ x86emuOp_popf_word, /* 0x9e */ x86emuOp_sahf, /* 0x9f */ x86emuOp_lahf, /* 0xa0 */ x86emuOp_mov_AL_M_IMM, /* 0xa1 */ x86emuOp_mov_AX_M_IMM, /* 0xa2 */ x86emuOp_mov_M_AL_IMM, /* 0xa3 */ x86emuOp_mov_M_AX_IMM, /* 0xa4 */ x86emuOp_movs_byte, /* 0xa5 */ x86emuOp_movs_word, /* 0xa6 */ x86emuOp_cmps_byte, /* 0xa7 */ x86emuOp_cmps_word, /* 0xa8 */ x86emuOp_test_AL_IMM, /* 0xa9 */ x86emuOp_test_AX_IMM, /* 0xaa */ x86emuOp_stos_byte, /* 0xab */ x86emuOp_stos_word, /* 0xac */ x86emuOp_lods_byte, /* 0xad */ x86emuOp_lods_word, /* 0xac */ x86emuOp_scas_byte, /* 0xad */ x86emuOp_scas_word, /* 0xb0 */ x86emuOp_mov_byte_register_IMM, /* 0xb1 */ x86emuOp_mov_byte_register_IMM, /* 0xb2 */ x86emuOp_mov_byte_register_IMM, /* 0xb3 */ x86emuOp_mov_byte_register_IMM, /* 0xb4 */ x86emuOp_mov_byte_register_IMM, /* 0xb5 */ x86emuOp_mov_byte_register_IMM, /* 0xb6 */ x86emuOp_mov_byte_register_IMM, /* 0xb7 */ x86emuOp_mov_byte_register_IMM, /* 0xb8 */ x86emuOp_mov_word_register_IMM, /* 0xb9 */ x86emuOp_mov_word_register_IMM, /* 0xba */ x86emuOp_mov_word_register_IMM, /* 0xbb */ x86emuOp_mov_word_register_IMM, /* 0xbc */ x86emuOp_mov_word_register_IMM, /* 0xbd */ x86emuOp_mov_word_register_IMM, /* 0xbe */ x86emuOp_mov_word_register_IMM, /* 0xbf */ x86emuOp_mov_word_register_IMM, /* 0xc0 */ x86emuOp_opcC0_byte_RM_MEM, /* 0xc1 */ x86emuOp_opcC1_word_RM_MEM, /* 0xc2 */ x86emuOp_ret_near_IMM, /* 0xc3 */ x86emuOp_ret_near, /* 0xc4 */ x86emuOp_les_R_IMM, /* 0xc5 */ x86emuOp_lds_R_IMM, /* 0xc6 */ x86emuOp_mov_byte_RM_IMM, /* 0xc7 */ x86emuOp_mov_word_RM_IMM, /* 0xc8 */ x86emuOp_enter, /* 0xc9 */ x86emuOp_leave, /* 0xca */ x86emuOp_ret_far_IMM, /* 0xcb */ x86emuOp_ret_far, /* 0xcc */ x86emuOp_int3, /* 0xcd */ x86emuOp_int_IMM, /* 0xce */ x86emuOp_into, /* 0xcf */ x86emuOp_iret, /* 0xd0 */ x86emuOp_opcD0_byte_RM_1, /* 0xd1 */ x86emuOp_opcD1_word_RM_1, /* 0xd2 */ x86emuOp_opcD2_byte_RM_CL, /* 0xd3 */ x86emuOp_opcD3_word_RM_CL, /* 0xd4 */ x86emuOp_aam, /* 0xd5 */ x86emuOp_aad, /* 0xd6 */ x86emuOp_illegal_op, /* Undocumented SETALC instruction */ /* 0xd7 */ x86emuOp_xlat, /* 0xd8 */ NULL, /*x86emuOp_esc_coprocess_d8,*/ /* 0xd9 */ NULL, /*x86emuOp_esc_coprocess_d9,*/ /* 0xda */ NULL, /*x86emuOp_esc_coprocess_da,*/ /* 0xdb */ NULL, /*x86emuOp_esc_coprocess_db,*/ /* 0xdc */ NULL, /*x86emuOp_esc_coprocess_dc,*/ /* 0xdd */ NULL, /*x86emuOp_esc_coprocess_dd,*/ /* 0xde */ NULL, /*x86emuOp_esc_coprocess_de,*/ /* 0xdf */ NULL, /*x86emuOp_esc_coprocess_df,*/ /* 0xe0 */ x86emuOp_loopne, /* 0xe1 */ x86emuOp_loope, /* 0xe2 */ x86emuOp_loop, /* 0xe3 */ x86emuOp_jcxz, /* 0xe4 */ x86emuOp_in_byte_AL_IMM, /* 0xe5 */ x86emuOp_in_word_AX_IMM, /* 0xe6 */ x86emuOp_out_byte_IMM_AL, /* 0xe7 */ x86emuOp_out_word_IMM_AX, /* 0xe8 */ x86emuOp_call_near_IMM, /* 0xe9 */ x86emuOp_jump_near_IMM, /* 0xea */ x86emuOp_jump_far_IMM, /* 0xeb */ x86emuOp_jump_byte_IMM, /* 0xec */ x86emuOp_in_byte_AL_DX, /* 0xed */ x86emuOp_in_word_AX_DX, /* 0xee */ x86emuOp_out_byte_DX_AL, /* 0xef */ x86emuOp_out_word_DX_AX, /* 0xf0 */ x86emuOp_lock, /* 0xf1 */ x86emuOp_illegal_op, /* 0xf2 */ x86emuOp_repne, /* 0xf3 */ x86emuOp_repe, /* 0xf4 */ x86emuOp_halt, /* 0xf5 */ x86emuOp_cmc, /* 0xf6 */ x86emuOp_opcF6_byte_RM, /* 0xf7 */ x86emuOp_opcF7_word_RM, /* 0xf8 */ x86emuOp_clc, /* 0xf9 */ x86emuOp_stc, /* 0xfa */ x86emuOp_cli, /* 0xfb */ x86emuOp_sti, /* 0xfc */ x86emuOp_cld, /* 0xfd */ x86emuOp_std, /* 0xfe */ x86emuOp_opcFE_byte_RM, /* 0xff */ x86emuOp_opcFF_word_RM, }; #endif
gpl-2.0
linhphi9x94/android_kernel_pantech_msm8974
drivers/usb/gadget/u_bam.c
24
40488
/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/termios.h> #include <mach/msm_smd.h> #include <linux/netdevice.h> #include <mach/bam_dmux.h> #include <linux/debugfs.h> #include <linux/bitops.h> #include <linux/termios.h> #include <mach/usb_gadget_xport.h> #include <linux/usb/msm_hsusb.h> #include <mach/usb_bam.h> #include "u_rmnet.h" #define BAM_N_PORTS 1 #define BAM2BAM_N_PORTS 3 static struct workqueue_struct *gbam_wq; static int n_bam_ports; static int n_bam2bam_ports; static unsigned n_tx_req_queued; static unsigned bam_ch_ids[] = { 8 }; static const char *bam_ch_names[] = { "bam_dmux_ch_8" }; #define BAM_PENDING_LIMIT 220 #define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000 #define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500 #define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300 #define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1 #define BAM_MUX_HDR 8 #define BAM_MUX_RX_Q_SIZE 16 #define BAM_MUX_TX_Q_SIZE 200 #define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */ #define DL_INTR_THRESHOLD 20 #define FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND #ifdef FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND //#define USE_ZERO_FLAG //if zero_flag should be used, define this feature. #endif static unsigned int bam_pending_limit = BAM_PENDING_LIMIT; module_param(bam_pending_limit, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD; module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD; module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT; module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD; module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE; module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE; module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR); static unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE; module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR); static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD; module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR); #define BAM_CH_OPENED BIT(0) #define BAM_CH_READY BIT(1) struct bam_ch_info { unsigned long flags; unsigned id; struct list_head tx_idle; struct sk_buff_head tx_skb_q; struct list_head rx_idle; struct sk_buff_head rx_skb_q; struct gbam_port *port; struct work_struct write_tobam_w; struct work_struct write_tohost_w; struct usb_request *rx_req; struct usb_request *tx_req; u32 src_pipe_idx; u32 dst_pipe_idx; u8 src_connection_idx; u8 dst_connection_idx; enum transport_type trans; struct usb_bam_connect_ipa_params ipa_params; /* stats */ unsigned int pending_with_bam; unsigned int tohost_drp_cnt; unsigned int tomodem_drp_cnt; unsigned int tx_len; unsigned int rx_len; unsigned long to_modem; unsigned long to_host; unsigned int rx_flow_control_disable; unsigned int rx_flow_control_enable; unsigned int rx_flow_control_triggered; unsigned int max_num_pkts_pending_with_bam; }; struct gbam_port { unsigned port_num; spinlock_t port_lock_ul; spinlock_t port_lock_dl; struct grmnet *port_usb; struct grmnet *gr; struct bam_ch_info data_ch; struct work_struct connect_w; struct work_struct disconnect_w; struct work_struct suspend_w; struct work_struct resume_w; }; static struct bam_portmaster { struct gbam_port *port; struct platform_driver pdrv; } bam_ports[BAM_N_PORTS]; struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS]; static void gbam_start_rx(struct gbam_port *port); static void gbam_start_endless_rx(struct gbam_port *port); static void gbam_start_endless_tx(struct gbam_port *port); static int gbam_peer_reset_cb(void *param); /*---------------misc functions---------------- */ static void gbam_free_requests(struct usb_ep *ep, struct list_head *head) { struct usb_request *req; while (!list_empty(head)) { req = list_entry(head->next, struct usb_request, list); list_del(&req->list); usb_ep_free_request(ep, req); } } static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head, int num, void (*cb)(struct usb_ep *ep, struct usb_request *), gfp_t flags) { int i; struct usb_request *req; pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__, ep, head, num, cb); for (i = 0; i < num; i++) { req = usb_ep_alloc_request(ep, flags); if (!req) { pr_debug("%s: req allocated:%d\n", __func__, i); return list_empty(head) ? -ENOMEM : 0; } req->complete = cb; list_add(&req->list, head); } return 0; } /*--------------------------------------------- */ /*------------data_path----------------------------*/ static void gbam_write_data_tohost(struct gbam_port *port) { unsigned long flags; struct bam_ch_info *d = &port->data_ch; struct sk_buff *skb; int ret; struct usb_request *req; struct usb_ep *ep; #if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND) && !defined(USE_ZERO_FLAG) static bool useZLP = false; #endif spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } ep = port->port_usb->in; while (!list_empty(&d->tx_idle)) { #if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND) && !defined(USE_ZERO_FLAG) if(useZLP){ useZLP = false; req = list_first_entry(&d->tx_idle, struct usb_request, list); req->context = NULL; req->buf = NULL; req->length = 0; }else{ skb = __skb_dequeue(&d->tx_skb_q); if (!skb) { spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } req = list_first_entry(&d->tx_idle, struct usb_request, list); req->context = skb; req->buf = skb->data; req->length = skb->len; } #else skb = __skb_dequeue(&d->tx_skb_q); if (!skb) { spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } req = list_first_entry(&d->tx_idle, struct usb_request, list); req->context = skb; req->buf = skb->data; req->length = skb->len; #endif n_tx_req_queued++; if (n_tx_req_queued == dl_intr_threshold) { req->no_interrupt = 0; n_tx_req_queued = 0; } else { req->no_interrupt = 1; } // LS2_USB 20130723 pooyi send zelo length packet if((req->length % ep->maxpacket) == 0) req->zero = 1; else req->zero = 0; #if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND) #ifdef USE_ZERO_FLAG // LS2_USB 20130723 pooyi send zelo length packet if(req->length && ((req->length % ep->maxpacket) == 0)) req->zero = 1; else req->zero = 0; #else if(req->length && ((req->length % ep->maxpacket) == 0)) useZLP = true; else useZLP = false; #endif #else /* Send ZLP in case packet length is multiple of maxpacksize */ req->zero = 1; #endif list_del(&req->list); spin_unlock(&port->port_lock_dl); ret = usb_ep_queue(ep, req, GFP_ATOMIC); spin_lock(&port->port_lock_dl); if (ret) { pr_err("%s: usb epIn failed with %d\n", __func__, ret); list_add(&req->list, &d->tx_idle); dev_kfree_skb_any(skb); break; } d->to_host++; } spin_unlock_irqrestore(&port->port_lock_dl, flags); } static void gbam_write_data_tohost_w(struct work_struct *w) { struct bam_ch_info *d; struct gbam_port *port; d = container_of(w, struct bam_ch_info, write_tohost_w); port = d->port; gbam_write_data_tohost(port); } void gbam_data_recv_cb(void *p, struct sk_buff *skb) { struct gbam_port *port = p; struct bam_ch_info *d = &port->data_ch; unsigned long flags; if (!skb) return; pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__, port, port->port_num, d, skb->len); spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) { d->tohost_drp_cnt++; if (printk_ratelimit()) pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n", __func__, d->tohost_drp_cnt); spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } __skb_queue_tail(&d->tx_skb_q, skb); spin_unlock_irqrestore(&port->port_lock_dl, flags); gbam_write_data_tohost(port); } void gbam_data_write_done(void *p, struct sk_buff *skb) { struct gbam_port *port = p; struct bam_ch_info *d = &port->data_ch; unsigned long flags; if (!skb) return; dev_kfree_skb_any(skb); spin_lock_irqsave(&port->port_lock_ul, flags); d->pending_with_bam--; pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__, port, d, d->to_modem, d->pending_with_bam, port->port_num); spin_unlock_irqrestore(&port->port_lock_ul, flags); queue_work(gbam_wq, &d->write_tobam_w); } static void gbam_data_write_tobam(struct work_struct *w) { struct gbam_port *port; struct bam_ch_info *d; struct sk_buff *skb; unsigned long flags; int ret; int qlen; d = container_of(w, struct bam_ch_info, write_tobam_w); port = d->port; spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } while (d->pending_with_bam < bam_pending_limit) { skb = __skb_dequeue(&d->rx_skb_q); if (!skb) break; d->pending_with_bam++; d->to_modem++; pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__, port, d, d->to_modem, d->pending_with_bam, port->port_num); spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = msm_bam_dmux_write(d->id, skb); spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { pr_debug("%s: write error:%d\n", __func__, ret); d->pending_with_bam--; d->to_modem--; d->tomodem_drp_cnt++; dev_kfree_skb_any(skb); break; } if (d->pending_with_bam > d->max_num_pkts_pending_with_bam) d->max_num_pkts_pending_with_bam = d->pending_with_bam; } qlen = d->rx_skb_q.qlen; spin_unlock_irqrestore(&port->port_lock_ul, flags); if (qlen < bam_mux_rx_fctrl_dis_thld) { if (d->rx_flow_control_triggered) { d->rx_flow_control_disable++; d->rx_flow_control_triggered = 0; } gbam_start_rx(port); } } /*-------------------------------------------------------------*/ static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req) { struct gbam_port *port = ep->driver_data; struct bam_ch_info *d; struct sk_buff *skb = req->context; int status = req->status; switch (status) { case 0: /* successful completion */ break; case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ dev_kfree_skb_any(skb); usb_ep_free_request(ep, req); return; default: pr_err("%s: data tx ep error %d\n", __func__, status); break; } #if defined(FEATURE_PANTECH_ANDROID_USB_ZLP_WORKAROUND) && !defined(USE_ZERO_FLAG) if(skb) dev_kfree_skb_any(skb); #else dev_kfree_skb_any(skb); #endif if (!port) return; spin_lock(&port->port_lock_dl); d = &port->data_ch; list_add_tail(&req->list, &d->tx_idle); spin_unlock(&port->port_lock_dl); queue_work(gbam_wq, &d->write_tohost_w); } static void gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) { struct gbam_port *port = ep->driver_data; struct bam_ch_info *d = &port->data_ch; struct sk_buff *skb = req->context; int status = req->status; int queue = 0; switch (status) { case 0: skb_put(skb, req->actual); queue = 1; break; case -ECONNRESET: case -ESHUTDOWN: /* cable disconnection */ dev_kfree_skb_any(skb); req->buf = 0; usb_ep_free_request(ep, req); return; default: if (printk_ratelimit()) pr_err("%s: %s response error %d, %d/%d\n", __func__, ep->name, status, req->actual, req->length); dev_kfree_skb_any(skb); break; } spin_lock(&port->port_lock_ul); if (queue) { __skb_queue_tail(&d->rx_skb_q, skb); queue_work(gbam_wq, &d->write_tobam_w); } /* TODO: Handle flow control gracefully by having * having call back mechanism from bam driver */ if (bam_mux_rx_fctrl_support && d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) { if (!d->rx_flow_control_triggered) { d->rx_flow_control_triggered = 1; d->rx_flow_control_enable++; } list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); return; } spin_unlock(&port->port_lock_ul); skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC); if (!skb) { spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); return; } skb_reserve(skb, BAM_MUX_HDR); req->buf = skb->data; req->length = bam_mux_rx_req_size; req->context = skb; status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { dev_kfree_skb_any(skb); if (printk_ratelimit()) pr_err("%s: data rx enqueue err %d\n", __func__, status); spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); spin_unlock(&port->port_lock_ul); } } static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req) { int status = req->status; pr_debug("%s status: %d\n", __func__, status); } static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req) { int status = req->status; pr_debug("%s status: %d\n", __func__, status); } static void gbam_start_rx(struct gbam_port *port) { struct usb_request *req; struct bam_ch_info *d; struct usb_ep *ep; unsigned long flags; int ret; struct sk_buff *skb; spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } d = &port->data_ch; ep = port->port_usb->out; while (port->port_usb && !list_empty(&d->rx_idle)) { if (bam_mux_rx_fctrl_support && d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) break; req = list_first_entry(&d->rx_idle, struct usb_request, list); skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, BAM_MUX_HDR); list_del(&req->list); req->buf = skb->data; req->length = bam_mux_rx_req_size; req->context = skb; spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = usb_ep_queue(ep, req, GFP_ATOMIC); spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { dev_kfree_skb_any(skb); if (printk_ratelimit()) pr_err("%s: rx queue failed %d\n", __func__, ret); if (port->port_usb) list_add(&req->list, &d->rx_idle); else usb_ep_free_request(ep, req); break; } } spin_unlock_irqrestore(&port->port_lock_ul, flags); } static void gbam_start_endless_rx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_ul); if (!port->port_usb) { spin_unlock(&port->port_lock_ul); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: enqueue\n", __func__); status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC); if (status) pr_err("%s: error enqueuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_ul); } static void gbam_start_endless_tx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_dl); if (!port->port_usb) { spin_unlock(&port->port_lock_dl); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: enqueue\n", __func__); status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC); if (status) pr_err("%s: error enqueuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_dl); } static void gbam_stop_endless_rx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_ul); if (!port->port_usb) { spin_unlock(&port->port_lock_ul); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: dequeue\n", __func__); status = usb_ep_dequeue(port->port_usb->out, d->rx_req); if (status) pr_err("%s: error dequeuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_ul); } static void gbam_stop_endless_tx(struct gbam_port *port) { struct bam_ch_info *d = &port->data_ch; int status; spin_lock(&port->port_lock_dl); if (!port->port_usb) { spin_unlock(&port->port_lock_dl); pr_err("%s: port->port_usb is NULL", __func__); return; } pr_debug("%s: dequeue\n", __func__); status = usb_ep_dequeue(port->port_usb->in, d->tx_req); if (status) pr_err("%s: error dequeuing transfer, %d\n", __func__, status); spin_unlock(&port->port_lock_dl); } static void gbam_start(void *param, enum usb_bam_pipe_dir dir) { struct gbam_port *port = param; if (dir == USB_TO_PEER_PERIPHERAL) gbam_start_endless_rx(port); else gbam_start_endless_tx(port); } static void gbam_stop(void *param, enum usb_bam_pipe_dir dir) { struct gbam_port *port = param; if (dir == USB_TO_PEER_PERIPHERAL) gbam_stop_endless_rx(port); else gbam_stop_endless_tx(port); } static void gbam_start_io(struct gbam_port *port) { unsigned long flags; struct usb_ep *ep; int ret; struct bam_ch_info *d; pr_debug("%s: port:%p\n", __func__, port); spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } d = &port->data_ch; ep = port->port_usb->out; ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size, gbam_epout_complete, GFP_ATOMIC); if (ret) { pr_err("%s: rx req allocation failed\n", __func__); spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } spin_unlock_irqrestore(&port->port_lock_ul, flags); spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { gbam_free_requests(ep, &d->rx_idle); spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } ep = port->port_usb->in; ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size, gbam_epin_complete, GFP_ATOMIC); if (ret) { pr_err("%s: tx req allocation failed\n", __func__); gbam_free_requests(ep, &d->rx_idle); spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } spin_unlock_irqrestore(&port->port_lock_dl, flags); /* queue out requests */ gbam_start_rx(port); } static void gbam_notify(void *p, int event, unsigned long data) { switch (event) { case BAM_DMUX_RECEIVE: gbam_data_recv_cb(p, (struct sk_buff *)(data)); break; case BAM_DMUX_WRITE_DONE: gbam_data_write_done(p, (struct sk_buff *)(data)); break; } } static void gbam_free_buffers(struct gbam_port *port) { struct sk_buff *skb; unsigned long flags; struct bam_ch_info *d; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (!port || !port->port_usb) goto free_buf_out; d = &port->data_ch; gbam_free_requests(port->port_usb->in, &d->tx_idle); gbam_free_requests(port->port_usb->out, &d->rx_idle); while ((skb = __skb_dequeue(&d->tx_skb_q))) dev_kfree_skb_any(skb); while ((skb = __skb_dequeue(&d->rx_skb_q))) dev_kfree_skb_any(skb); free_buf_out: spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); } static void gbam_disconnect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, disconnect_w); struct bam_ch_info *d = &port->data_ch; if (!test_bit(BAM_CH_OPENED, &d->flags)) return; msm_bam_dmux_close(d->id); clear_bit(BAM_CH_OPENED, &d->flags); } static void gbam2bam_disconnect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, disconnect_w); struct bam_ch_info *d = &port->data_ch; int ret; if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) { ret = usb_bam_disconnect_ipa(&d->ipa_params); if (ret) pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n", __func__, ret); teth_bridge_disconnect(); } } static void gbam_connect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, connect_w); struct bam_ch_info *d = &port->data_ch; int ret; unsigned long flags; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (!port->port_usb) { spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (!test_bit(BAM_CH_READY, &d->flags)) return; ret = msm_bam_dmux_open(d->id, port, gbam_notify); if (ret) { pr_err("%s: unable open bam ch:%d err:%d\n", __func__, d->id, ret); return; } set_bit(BAM_CH_OPENED, &d->flags); gbam_start_io(port); pr_debug("%s: done\n", __func__); } static void gbam2bam_connect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, connect_w); struct teth_bridge_connect_params connect_params; struct bam_ch_info *d = &port->data_ch; u32 sps_params; ipa_notify_cb usb_notify_cb; void *priv; int ret; unsigned long flags; if (d->trans == USB_GADGET_XPORT_BAM2BAM) { usb_bam_reset_complete(); ret = usb_bam_connect(d->src_connection_idx, &d->src_pipe_idx); if (ret) { pr_err("%s: usb_bam_connect (src) failed: err:%d\n", __func__, ret); return; } ret = usb_bam_connect(d->dst_connection_idx, &d->dst_pipe_idx); if (ret) { pr_err("%s: usb_bam_connect (dst) failed: err:%d\n", __func__, ret); return; } } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) { ret = teth_bridge_init(&usb_notify_cb, &priv); if (ret) { pr_err("%s:teth_bridge_init() failed\n", __func__); return; } d->ipa_params.notify = usb_notify_cb; d->ipa_params.priv = priv; d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC; d->ipa_params.client = IPA_CLIENT_USB_PROD; d->ipa_params.dir = USB_TO_PEER_PERIPHERAL; ret = usb_bam_connect_ipa(&d->ipa_params); if (ret) { pr_err("%s: usb_bam_connect_ipa failed: err:%d\n", __func__, ret); return; } d->ipa_params.client = IPA_CLIENT_USB_CONS; d->ipa_params.dir = PEER_PERIPHERAL_TO_USB; ret = usb_bam_connect_ipa(&d->ipa_params); if (ret) { pr_err("%s: usb_bam_connect_ipa failed: err:%d\n", __func__, ret); return; } connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl; connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl; connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET; ret = teth_bridge_connect(&connect_params); if (ret) { pr_err("%s:teth_bridge_connect() failed\n", __func__); return; } } spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (!port->port_usb) { pr_debug("%s: usb cable is disconnected, exiting\n", __func__); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_ATOMIC); if (!d->rx_req) { spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); pr_err("%s: out of memory\n", __func__); return; } d->rx_req->context = port; d->rx_req->complete = gbam_endless_rx_complete; d->rx_req->length = 0; d->rx_req->no_interrupt = 1; sps_params = (MSM_SPS_MODE | d->src_pipe_idx | MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER; d->rx_req->udc_priv = sps_params; d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_ATOMIC); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (!d->tx_req) { pr_err("%s: out of memory\n", __func__); return; } d->tx_req->context = port; d->tx_req->complete = gbam_endless_tx_complete; d->tx_req->length = 0; d->tx_req->no_interrupt = 1; sps_params = (MSM_SPS_MODE | d->dst_pipe_idx | MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER; d->tx_req->udc_priv = sps_params; /* queue in & out requests */ gbam_start_endless_rx(port); gbam_start_endless_tx(port); if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) { /* Register for peer reset callback */ usb_bam_register_peer_reset_cb(gbam_peer_reset_cb, port); ret = usb_bam_client_ready(true); if (ret) { pr_err("%s: usb_bam_client_ready failed: err:%d\n", __func__, ret); return; } } pr_debug("%s: done\n", __func__); } static int gbam_wake_cb(void *param) { struct gbam_port *port = (struct gbam_port *)param; struct bam_ch_info *d; struct f_rmnet *dev; dev = port_to_rmnet(port->gr); d = &port->data_ch; pr_debug("%s: woken up by peer\n", __func__); return usb_gadget_wakeup(dev->cdev->gadget); } static void gbam2bam_suspend_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, suspend_w); struct bam_ch_info *d = &port->data_ch; pr_debug("%s: suspend work started\n", __func__); usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port); if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) { usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port); usb_bam_suspend(&d->ipa_params); } } static void gbam2bam_resume_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, resume_w); struct bam_ch_info *d = &port->data_ch; pr_debug("%s: resume work started\n", __func__); usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL); if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) usb_bam_resume(&d->ipa_params); } static int gbam_peer_reset_cb(void *param) { struct gbam_port *port = (struct gbam_port *)param; struct bam_ch_info *d; struct f_rmnet *dev; struct usb_gadget *gadget; int ret; bool reenable_eps = false; dev = port_to_rmnet(port->gr); d = &port->data_ch; gadget = dev->cdev->gadget; pr_debug("%s: reset by peer\n", __func__); /* Disable the relevant EPs if currently EPs are enabled */ if (port->port_usb && port->port_usb->in && port->port_usb->in->driver_data) { usb_ep_disable(port->port_usb->out); usb_ep_disable(port->port_usb->in); port->port_usb->in->driver_data = NULL; port->port_usb->out->driver_data = NULL; reenable_eps = true; } /* Disable BAM */ msm_hw_bam_disable(1); /* Reset BAM */ ret = usb_bam_a2_reset(0); if (ret) { pr_err("%s: BAM reset failed %d\n", __func__, ret); goto reenable_eps; } /* Enable BAM */ msm_hw_bam_disable(0); reenable_eps: /* Re-Enable the relevant EPs, if EPs were originally enabled */ if (reenable_eps) { ret = usb_ep_enable(port->port_usb->in); if (ret) { pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", __func__, port->port_usb->in); return ret; } port->port_usb->in->driver_data = port; ret = usb_ep_enable(port->port_usb->out); if (ret) { pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", __func__, port->port_usb->out); port->port_usb->in->driver_data = 0; return ret; } port->port_usb->out->driver_data = port; gbam_start_endless_rx(port); gbam_start_endless_tx(port); } /* Unregister the peer reset callback */ if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) usb_bam_register_peer_reset_cb(NULL, NULL); return 0; } /* BAM data channel ready, allow attempt to open */ static int gbam_data_ch_probe(struct platform_device *pdev) { struct gbam_port *port; struct bam_ch_info *d; int i; unsigned long flags; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_bam_ports; i++) { port = bam_ports[i].port; d = &port->data_ch; if (!strncmp(bam_ch_names[i], pdev->name, BAM_DMUX_CH_NAME_MAX_LEN)) { set_bit(BAM_CH_READY, &d->flags); /* if usb is online, try opening bam_ch */ spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (port->port_usb) queue_work(gbam_wq, &port->connect_w); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); break; } } return 0; } /* BAM data channel went inactive, so close it */ static int gbam_data_ch_remove(struct platform_device *pdev) { struct gbam_port *port; struct bam_ch_info *d; struct usb_ep *ep_in = NULL; struct usb_ep *ep_out = NULL; unsigned long flags; int i; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_bam_ports; i++) { if (!strncmp(bam_ch_names[i], pdev->name, BAM_DMUX_CH_NAME_MAX_LEN)) { port = bam_ports[i].port; d = &port->data_ch; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); if (port->port_usb) { ep_in = port->port_usb->in; ep_out = port->port_usb->out; } spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (ep_in) usb_ep_fifo_flush(ep_in); if (ep_out) usb_ep_fifo_flush(ep_out); gbam_free_buffers(port); msm_bam_dmux_close(d->id); /* bam dmux will free all pending skbs */ d->pending_with_bam = 0; clear_bit(BAM_CH_READY, &d->flags); clear_bit(BAM_CH_OPENED, &d->flags); } } return 0; } static void gbam_port_free(int portno) { struct gbam_port *port = bam_ports[portno].port; struct platform_driver *pdrv = &bam_ports[portno].pdrv; if (port) { kfree(port); platform_driver_unregister(pdrv); } } static void gbam2bam_port_free(int portno) { struct gbam_port *port = bam2bam_ports[portno]; kfree(port); } static int gbam_port_alloc(int portno) { struct gbam_port *port; struct bam_ch_info *d; struct platform_driver *pdrv; port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL); if (!port) return -ENOMEM; port->port_num = portno; /* port initialization */ spin_lock_init(&port->port_lock_ul); spin_lock_init(&port->port_lock_dl); INIT_WORK(&port->connect_w, gbam_connect_work); INIT_WORK(&port->disconnect_w, gbam_disconnect_work); /* data ch */ d = &port->data_ch; d->port = port; INIT_LIST_HEAD(&d->tx_idle); INIT_LIST_HEAD(&d->rx_idle); INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam); INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w); skb_queue_head_init(&d->tx_skb_q); skb_queue_head_init(&d->rx_skb_q); d->id = bam_ch_ids[portno]; bam_ports[portno].port = port; pdrv = &bam_ports[portno].pdrv; pdrv->probe = gbam_data_ch_probe; pdrv->remove = gbam_data_ch_remove; pdrv->driver.name = bam_ch_names[portno]; pdrv->driver.owner = THIS_MODULE; platform_driver_register(pdrv); pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); return 0; } static int gbam2bam_port_alloc(int portno) { struct gbam_port *port; struct bam_ch_info *d; port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL); if (!port) return -ENOMEM; port->port_num = portno; /* port initialization */ spin_lock_init(&port->port_lock_ul); spin_lock_init(&port->port_lock_dl); INIT_WORK(&port->connect_w, gbam2bam_connect_work); INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work); INIT_WORK(&port->suspend_w, gbam2bam_suspend_work); INIT_WORK(&port->resume_w, gbam2bam_resume_work); /* data ch */ d = &port->data_ch; d->port = port; bam2bam_ports[portno] = port; pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); return 0; } #if defined(CONFIG_DEBUG_FS) #define DEBUG_BUF_SIZE 1024 static ssize_t gbam_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct gbam_port *port; struct bam_ch_info *d; char *buf; unsigned long flags; int ret; int i; int temp = 0; buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < n_bam_ports; i++) { port = bam_ports[i].port; if (!port) continue; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); d = &port->data_ch; temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, "#PORT:%d port:%p data_ch:%p#\n" "dpkts_to_usbhost: %lu\n" "dpkts_to_modem: %lu\n" "dpkts_pwith_bam: %u\n" "to_usbhost_dcnt: %u\n" "tomodem__dcnt: %u\n" "rx_flow_control_disable_count: %u\n" "rx_flow_control_enable_count: %u\n" "rx_flow_control_triggered: %u\n" "max_num_pkts_pending_with_bam: %u\n" "tx_buf_len: %u\n" "rx_buf_len: %u\n" "data_ch_open: %d\n" "data_ch_ready: %d\n", i, port, &port->data_ch, d->to_host, d->to_modem, d->pending_with_bam, d->tohost_drp_cnt, d->tomodem_drp_cnt, d->rx_flow_control_disable, d->rx_flow_control_enable, d->rx_flow_control_triggered, d->max_num_pkts_pending_with_bam, d->tx_skb_q.qlen, d->rx_skb_q.qlen, test_bit(BAM_CH_OPENED, &d->flags), test_bit(BAM_CH_READY, &d->flags)); spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); } ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); kfree(buf); return ret; } static ssize_t gbam_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct gbam_port *port; struct bam_ch_info *d; int i; unsigned long flags; for (i = 0; i < n_bam_ports; i++) { port = bam_ports[i].port; if (!port) continue; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); d = &port->data_ch; d->to_host = 0; d->to_modem = 0; d->pending_with_bam = 0; d->tohost_drp_cnt = 0; d->tomodem_drp_cnt = 0; d->rx_flow_control_disable = 0; d->rx_flow_control_enable = 0; d->rx_flow_control_triggered = 0; d->max_num_pkts_pending_with_bam = 0; spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); } return count; } const struct file_operations gbam_stats_ops = { .read = gbam_read_stats, .write = gbam_reset_stats, }; struct dentry *gbam_dent; static void gbam_debugfs_init(void) { struct dentry *dfile; gbam_dent = debugfs_create_dir("usb_rmnet", 0); if (!gbam_dent || IS_ERR(gbam_dent)) return; dfile = debugfs_create_file("status", 0444, gbam_dent, 0, &gbam_stats_ops); if (!dfile || IS_ERR(dfile)) { debugfs_remove(gbam_dent); gbam_dent = NULL; return; } } static void gbam_debugfs_remove(void) { debugfs_remove_recursive(gbam_dent); } #else static inline void gbam_debugfs_init(void) {} static inline void gbam_debugfs_remove(void) {} #endif void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; unsigned long flags; struct bam_ch_info *d; pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) { pr_err("%s: invalid bam portno#%d\n", __func__, port_num); return; } if ((trans == USB_GADGET_XPORT_BAM2BAM || trans == USB_GADGET_XPORT_BAM2BAM_IPA) && port_num >= n_bam2bam_ports) { pr_err("%s: invalid bam2bam portno#%d\n", __func__, port_num); return; } if (!gr) { pr_err("%s: grmnet port is null\n", __func__); return; } if (trans == USB_GADGET_XPORT_BAM) port = bam_ports[port_num].port; else port = bam2bam_ports[port_num]; d = &port->data_ch; port->gr = gr; if (trans == USB_GADGET_XPORT_BAM) gbam_free_buffers(port); spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); port->port_usb = 0; n_tx_req_queued = 0; spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); /* disable endpoints */ usb_ep_disable(gr->out); usb_ep_disable(gr->in); gr->in->driver_data = NULL; gr->out->driver_data = NULL; if (trans == USB_GADGET_XPORT_BAM || trans == USB_GADGET_XPORT_BAM2BAM_IPA) queue_work(gbam_wq, &port->disconnect_w); else if (trans == USB_GADGET_XPORT_BAM2BAM) { if (port_num == 0) { if (usb_bam_client_ready(false)) { pr_err("%s: usb_bam_client_ready failed\n", __func__); } } } } int gbam_connect(struct grmnet *gr, u8 port_num, enum transport_type trans, u8 src_connection_idx, u8 dst_connection_idx) { struct gbam_port *port; struct bam_ch_info *d; int ret; unsigned long flags; pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return -ENODEV; } if ((trans == USB_GADGET_XPORT_BAM2BAM || trans == USB_GADGET_XPORT_BAM2BAM_IPA) && port_num >= n_bam2bam_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return -ENODEV; } if (!gr) { pr_err("%s: grmnet port is null\n", __func__); return -ENODEV; } if (trans == USB_GADGET_XPORT_BAM) port = bam_ports[port_num].port; else port = bam2bam_ports[port_num]; d = &port->data_ch; ret = usb_ep_enable(gr->in); if (ret) { pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", __func__, gr->in); return ret; } gr->in->driver_data = port; ret = usb_ep_enable(gr->out); if (ret) { pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", __func__, gr->out); gr->in->driver_data = 0; return ret; } gr->out->driver_data = port; spin_lock_irqsave(&port->port_lock_ul, flags); spin_lock(&port->port_lock_dl); port->port_usb = gr; if (trans == USB_GADGET_XPORT_BAM) { d->to_host = 0; d->to_modem = 0; d->pending_with_bam = 0; d->tohost_drp_cnt = 0; d->tomodem_drp_cnt = 0; d->rx_flow_control_disable = 0; d->rx_flow_control_enable = 0; d->rx_flow_control_triggered = 0; d->max_num_pkts_pending_with_bam = 0; } spin_unlock(&port->port_lock_dl); spin_unlock_irqrestore(&port->port_lock_ul, flags); if (trans == USB_GADGET_XPORT_BAM2BAM) { port->gr = gr; d->src_connection_idx = src_connection_idx; d->dst_connection_idx = dst_connection_idx; } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) { port->gr = gr; d->ipa_params.src_pipe = &(d->src_pipe_idx); d->ipa_params.dst_pipe = &(d->dst_pipe_idx); d->ipa_params.src_idx = src_connection_idx; d->ipa_params.dst_idx = dst_connection_idx; } d->trans = trans; queue_work(gbam_wq, &port->connect_w); return 0; } int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port) { int i; int ret; pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n", __func__, no_bam_port, no_bam2bam_port); if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS || no_bam2bam_port > BAM2BAM_N_PORTS) { pr_err("%s: Invalid num of ports count:%d,%d\n", __func__, no_bam_port, no_bam2bam_port); return -EINVAL; } gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); if (!gbam_wq) { pr_err("%s: Unable to create workqueue gbam_wq\n", __func__); return -ENOMEM; } for (i = 0; i < no_bam_port; i++) { n_bam_ports++; ret = gbam_port_alloc(i); if (ret) { n_bam_ports--; pr_err("%s: Unable to alloc port:%d\n", __func__, i); goto free_bam_ports; } } for (i = 0; i < no_bam2bam_port; i++) { n_bam2bam_ports++; ret = gbam2bam_port_alloc(i); if (ret) { n_bam2bam_ports--; pr_err("%s: Unable to alloc port:%d\n", __func__, i); goto free_bam_ports; } } gbam_debugfs_init(); return 0; free_bam_ports: for (i = 0; i < n_bam_ports; i++) gbam_port_free(i); for (i = 0; i < n_bam2bam_ports; i++) gbam2bam_port_free(i); destroy_workqueue(gbam_wq); return ret; } void gbam_cleanup(void) { gbam_debugfs_remove(); } void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; struct bam_ch_info *d; if (trans != USB_GADGET_XPORT_BAM2BAM && trans != USB_GADGET_XPORT_BAM2BAM_IPA) return; port = bam2bam_ports[port_num]; d = &port->data_ch; pr_debug("%s: suspended port %d\n", __func__, port_num); queue_work(gbam_wq, &port->suspend_w); } void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; struct bam_ch_info *d; if (trans != USB_GADGET_XPORT_BAM2BAM && trans != USB_GADGET_XPORT_BAM2BAM_IPA) return; port = bam2bam_ports[port_num]; d = &port->data_ch; pr_debug("%s: resumed port %d\n", __func__, port_num); queue_work(gbam_wq, &port->resume_w); }
gpl-2.0
nanocad-lab/vipzone-glibc
iconvdata/ibm803.c
24
1146
/* Conversion from and to IBM803. Copyright (C) 2005 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Masahide Washizawa <washi@jp.ibm.com>, 2005. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <stdint.h> /* Get the conversion table. */ #define TABLES <ibm803.h> #define CHARSET_NAME "IBM803//" #define HAS_HOLES 1 /* Not all 256 character are defined. */ #include <8bit-gap.c>
gpl-2.0
AndreyPopovNew/asuswrt-merlin-rt-n
release/src/router/samba-3.5.8/source4/heimdal_build/hdb-glue.c
24
1089
/* Unix SMB/CIFS implementation. provide glue functions between heimdal and samba Copyright (C) Andrew Bartlett <abartlet@samba.org> 2009 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "includes.h" #include "system/network.h" #include "system/kerberos.h" #include "lib/socket/netif.h" #include "param/param.h" #include "heimdal/lib/hdb/hdb_locl.h" krb5_error_code hdb_sqlite_create(krb5_context context, HDB **db, const char *argument) { return EINVAL; }
gpl-2.0
AndyPhoenix9879/SkyMelon-msm8939
drivers/staging/prima/CORE/MAC/src/pe/lim/limSession.c
280
20210
/* * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /**========================================================================= \file limSession.c \brief implementation for lim Session related APIs \author Sunit Bhatia ========================================================================*/ /*-------------------------------------------------------------------------- Include Files ------------------------------------------------------------------------*/ #include "aniGlobal.h" #include "limDebug.h" #include "limSession.h" #include "limUtils.h" #if defined(FEATURE_WLAN_ESE) && !defined(FEATURE_WLAN_ESE_UPLOAD) #include "eseApi.h" #endif /*-------------------------------------------------------------------------- \brief peInitBeaconParams() - Initialize the beaconParams structure \param tpPESession - pointer to the session context or NULL if session can not be created. \return void \sa --------------------------------------------------------------------------*/ void peInitBeaconParams(tpAniSirGlobal pMac, tpPESession psessionEntry) { psessionEntry->beaconParams.beaconInterval = 0; psessionEntry->beaconParams.fShortPreamble = 0; psessionEntry->beaconParams.llaCoexist = 0; psessionEntry->beaconParams.llbCoexist = 0; psessionEntry->beaconParams.llgCoexist = 0; psessionEntry->beaconParams.ht20Coexist = 0; psessionEntry->beaconParams.llnNonGFCoexist = 0; psessionEntry->beaconParams.fRIFSMode = 0; psessionEntry->beaconParams.fLsigTXOPProtectionFullSupport = 0; psessionEntry->beaconParams.gHTObssMode = 0; // Number of legacy STAs associated vos_mem_set((void*)&psessionEntry->gLim11bParams, sizeof(tLimProtStaParams), 0); vos_mem_set((void*)&psessionEntry->gLim11aParams, sizeof(tLimProtStaParams), 0); vos_mem_set((void*)&psessionEntry->gLim11gParams, sizeof(tLimProtStaParams), 0); vos_mem_set((void*)&psessionEntry->gLimNonGfParams, sizeof(tLimProtStaParams), 0); vos_mem_set((void*)&psessionEntry->gLimHt20Params, sizeof(tLimProtStaParams), 0); vos_mem_set((void*)&psessionEntry->gLimLsigTxopParams, sizeof(tLimProtStaParams), 0); vos_mem_set((void*)&psessionEntry->gLimOlbcParams, sizeof(tLimProtStaParams), 0); } /*-------------------------------------------------------------------------- \brief peCreateSession() - creates a new PE session given the BSSID This function returns the session context and the session ID if the session corresponding to the passed BSSID is found in the PE session table. \param pMac - pointer to global adapter context \param bssid - BSSID of the new session \param sessionId -session ID is returned here, if session is created. \return tpPESession - pointer to the session context or NULL if session can not be created. \sa --------------------------------------------------------------------------*/ tpPESession peCreateSession(tpAniSirGlobal pMac, tANI_U8 *bssid , tANI_U8* sessionId, tANI_U16 numSta) { tANI_U8 i; for(i =0; i < pMac->lim.maxBssId; i++) { /* Find first free room in session table */ if(pMac->lim.gpSession[i].valid == FALSE) { vos_mem_set((void*)&pMac->lim.gpSession[i], sizeof(tPESession), 0); //Allocate space for Station Table for this session. pMac->lim.gpSession[i].dph.dphHashTable.pHashTable = vos_mem_vmalloc( sizeof(tpDphHashNode)*numSta); if ( NULL == pMac->lim.gpSession[i].dph.dphHashTable.pHashTable ) { limLog(pMac, LOGE, FL("memory allocate for size %lu failed!"), (long unsigned int) sizeof(tpDphHashNode)*numSta); return NULL; } pMac->lim.gpSession[i].dph.dphHashTable.pDphNodeArray = vos_mem_vmalloc( sizeof(tDphHashNode)*numSta); if ( NULL == pMac->lim.gpSession[i].dph.dphHashTable.pDphNodeArray ) { limLog(pMac, LOGE, FL("memory allocate failed for Node array" "of size %lu"), (long unsigned int) sizeof(tDphHashNode)*numSta); vos_mem_vfree(pMac->lim.gpSession[i].dph.dphHashTable.pHashTable); pMac->lim.gpSession[i].dph.dphHashTable.pHashTable = NULL; return NULL; } pMac->lim.gpSession[i].dph.dphHashTable.size = numSta; dphHashTableClassInit(pMac, &pMac->lim.gpSession[i].dph.dphHashTable); pMac->lim.gpSession[i].gpLimPeerIdxpool = vos_mem_vmalloc(sizeof( *pMac->lim.gpSession[i].gpLimPeerIdxpool) * (numSta+1)); if ( NULL == pMac->lim.gpSession[i].gpLimPeerIdxpool ) { limLog(pMac, LOGE, FL("memory allocate failed " "for peerId pool of size %lu!"), (long unsigned int) sizeof(*pMac->lim.gpSession[i].gpLimPeerIdxpool) * (numSta+1)); vos_mem_vfree(pMac->lim.gpSession[i].dph.dphHashTable.pHashTable); vos_mem_vfree(pMac->lim.gpSession[i].dph.dphHashTable.pDphNodeArray); pMac->lim.gpSession[i].dph.dphHashTable.pHashTable = NULL; pMac->lim.gpSession[i].dph.dphHashTable.pDphNodeArray = NULL; return NULL; } vos_mem_set(pMac->lim.gpSession[i].gpLimPeerIdxpool, sizeof(*pMac->lim.gpSession[i].gpLimPeerIdxpool) * (numSta+1), 0); pMac->lim.gpSession[i].freePeerIdxHead = 0; pMac->lim.gpSession[i].freePeerIdxTail = 0; pMac->lim.gpSession[i].gLimNumOfCurrentSTAs = 0; /* Copy the BSSID to the session table */ sirCopyMacAddr(pMac->lim.gpSession[i].bssId, bssid); pMac->lim.gpSession[i].valid = TRUE; /* Intialize the SME and MLM states to IDLE */ pMac->lim.gpSession[i].limMlmState = eLIM_MLM_IDLE_STATE; pMac->lim.gpSession[i].limSmeState = eLIM_SME_IDLE_STATE; pMac->lim.gpSession[i].limCurrentAuthType = eSIR_OPEN_SYSTEM; peInitBeaconParams(pMac, &pMac->lim.gpSession[i]); #ifdef WLAN_FEATURE_VOWIFI_11R pMac->lim.gpSession[i].is11Rconnection = FALSE; #endif #ifdef FEATURE_WLAN_ESE pMac->lim.gpSession[i].isESEconnection = FALSE; #endif #if defined WLAN_FEATURE_VOWIFI_11R || defined FEATURE_WLAN_ESE || defined(FEATURE_WLAN_LFR) pMac->lim.gpSession[i].isFastTransitionEnabled = FALSE; #endif #ifdef FEATURE_WLAN_LFR pMac->lim.gpSession[i].isFastRoamIniFeatureEnabled = FALSE; #endif *sessionId = i; pMac->lim.gpSession[i].gLimPhyMode = WNI_CFG_PHY_MODE_11G; //TODO :Check with the team what should be default mode /* Initialize CB mode variables when session is created */ pMac->lim.gpSession[i].htSupportedChannelWidthSet = 0; pMac->lim.gpSession[i].htRecommendedTxWidthSet = 0; pMac->lim.gpSession[i].htSecondaryChannelOffset = 0; #ifdef FEATURE_WLAN_TDLS vos_mem_set(pMac->lim.gpSession[i].peerAIDBitmap, sizeof(pMac->lim.gpSession[i].peerAIDBitmap), 0); pMac->lim.gpSession[i].tdlsChanSwitProhibited = 0; #endif pMac->lim.gpSession[i].fWaitForProbeRsp = 0; pMac->lim.gpSession[i].fIgnoreCapsChange = 0; limLog(pMac, LOG1, FL("Create a new sessionId (%d) with BSSID: " MAC_ADDRESS_STR " Max No. of STA %d"), pMac->lim.gpSession[i].peSessionId, MAC_ADDR_ARRAY(bssid), numSta); return(&pMac->lim.gpSession[i]); } } limLog(pMac, LOGE, FL("Session can not be created.. Reached Max permitted sessions ")); return NULL; } /*-------------------------------------------------------------------------- \brief peFindSessionByBssid() - looks up the PE session given the BSSID. This function returns the session context and the session ID if the session corresponding to the given BSSID is found in the PE session table. \param pMac - pointer to global adapter context \param bssid - BSSID of the session \param sessionId -session ID is returned here, if session is found. \return tpPESession - pointer to the session context or NULL if session is not found. \sa --------------------------------------------------------------------------*/ tpPESession peFindSessionByBssid(tpAniSirGlobal pMac, tANI_U8* bssid, tANI_U8* sessionId) { tANI_U8 i; for(i =0; i < pMac->lim.maxBssId; i++) { /* If BSSID matches return corresponding tables address*/ if( (pMac->lim.gpSession[i].valid) && (sirCompareMacAddr(pMac->lim.gpSession[i].bssId, bssid))) { *sessionId = i; return(&pMac->lim.gpSession[i]); } } limLog(pMac, LOG4, FL("Session lookup fails for BSSID: ")); limPrintMacAddr(pMac, bssid, LOG4); return(NULL); } /*-------------------------------------------------------------------------- \brief peFindSessionByBssIdx() - looks up the PE session given the bssIdx. This function returns the session context if the session corresponding to the given bssIdx is found in the PE session table. \param pMac - pointer to global adapter context \param bssIdx - bss index of the session \return tpPESession - pointer to the session context or NULL if session is not found. \sa --------------------------------------------------------------------------*/ tpPESession peFindSessionByBssIdx(tpAniSirGlobal pMac, tANI_U8 bssIdx) { tANI_U8 i; for (i = 0; i < pMac->lim.maxBssId; i++) { /* If BSSID matches return corresponding tables address*/ if ( (pMac->lim.gpSession[i].valid) && (pMac->lim.gpSession[i].bssIdx == bssIdx)) { return &pMac->lim.gpSession[i]; } } limLog(pMac, LOG4, FL("Session lookup fails for bssIdx: %d"), bssIdx); return NULL; } /*-------------------------------------------------------------------------- \brief peFindSessionBySessionId() - looks up the PE session given the session ID. This function returns the session context if the session corresponding to the given session ID is found in the PE session table. \param pMac - pointer to global adapter context \param sessionId -session ID for which session context needs to be looked up. \return tpPESession - pointer to the session context or NULL if session is not found. \sa --------------------------------------------------------------------------*/ tpPESession peFindSessionBySessionId(tpAniSirGlobal pMac , tANI_U8 sessionId) { if(sessionId >= pMac->lim.maxBssId) { limLog(pMac, LOGE, FL("Invalid sessionId: %d "), sessionId); return(NULL); } if((pMac->lim.gpSession[sessionId].valid == TRUE)) { return(&pMac->lim.gpSession[sessionId]); } return(NULL); } /*-------------------------------------------------------------------------- \brief peFindSessionByStaId() - looks up the PE session given staid. This function returns the session context and the session ID if the session corresponding to the given StaId is found in the PE session table. \param pMac - pointer to global adapter context \param staid - StaId of the session \param sessionId -session ID is returned here, if session is found. \return tpPESession - pointer to the session context or NULL if session is not found. \sa --------------------------------------------------------------------------*/ tpPESession peFindSessionByStaId(tpAniSirGlobal pMac, tANI_U8 staid, tANI_U8* sessionId) { tANI_U8 i, j; for(i =0; i < pMac->lim.maxBssId; i++) { if(pMac->lim.gpSession[i].valid) { for(j = 0; j < pMac->lim.gpSession[i].dph.dphHashTable.size; j++) { if((pMac->lim.gpSession[i].dph.dphHashTable.pDphNodeArray[j].valid) && (pMac->lim.gpSession[i].dph.dphHashTable.pDphNodeArray[j].added) && (staid == pMac->lim.gpSession[i].dph.dphHashTable.pDphNodeArray[j].staIndex)) { *sessionId = i; return(&pMac->lim.gpSession[i]); } } } } limLog(pMac, LOG4, FL("Session lookup fails for StaId: %d\n "), staid); return(NULL); } /*-------------------------------------------------------------------------- \brief peDeleteSession() - deletes the PE session given the session ID. \param pMac - pointer to global adapter context \param sessionId -session ID of the session which needs to be deleted. \sa --------------------------------------------------------------------------*/ void peDeleteSession(tpAniSirGlobal pMac, tpPESession psessionEntry) { tANI_U16 i = 0; tANI_U16 n; TX_TIMER *timer_ptr; eHalStatus lock_status = eHAL_STATUS_SUCCESS; limLog(pMac, LOGW, FL("Trying to delete a session %d Opmode %d BssIdx %d" " BSSID: " MAC_ADDRESS_STR), psessionEntry->peSessionId, psessionEntry->operMode, psessionEntry->bssIdx, MAC_ADDR_ARRAY(psessionEntry->bssId)); for (n = 0; n < pMac->lim.maxStation; n++) { timer_ptr = &pMac->lim.limTimers.gpLimCnfWaitTimer[n]; if(psessionEntry->peSessionId == timer_ptr->sessionId) { if(VOS_TRUE == tx_timer_running(timer_ptr)) { tx_timer_deactivate(timer_ptr); } } } #ifdef WLAN_FEATURE_11AC /* Unblock the MuBF for other session if the MuBf session is deleted */ if(psessionEntry->txMuBformee) { pMac->isMuBfsessionexist = FALSE; } #endif if (psessionEntry->pLimStartBssReq != NULL) { vos_mem_free( psessionEntry->pLimStartBssReq ); psessionEntry->pLimStartBssReq = NULL; } if(psessionEntry->pLimJoinReq != NULL) { vos_mem_free( psessionEntry->pLimJoinReq ); psessionEntry->pLimJoinReq = NULL; } if(psessionEntry->pLimReAssocReq != NULL) { vos_mem_free( psessionEntry->pLimReAssocReq ); psessionEntry->pLimReAssocReq = NULL; } if(psessionEntry->pLimMlmJoinReq != NULL) { vos_mem_free( psessionEntry->pLimMlmJoinReq ); psessionEntry->pLimMlmJoinReq = NULL; } lock_status = pe_AcquireGlobalLock(&pMac->lim); if (eHAL_STATUS_SUCCESS == lock_status) { if (psessionEntry->dph.dphHashTable.pHashTable != NULL) { vos_mem_vfree(psessionEntry->dph.dphHashTable.pHashTable); psessionEntry->dph.dphHashTable.pHashTable = NULL; } } pe_ReleaseGlobalLock(&pMac->lim); if(psessionEntry->dph.dphHashTable.pDphNodeArray != NULL) { vos_mem_vfree(psessionEntry->dph.dphHashTable.pDphNodeArray); psessionEntry->dph.dphHashTable.pDphNodeArray = NULL; } if(psessionEntry->gpLimPeerIdxpool != NULL) { vos_mem_vfree(psessionEntry->gpLimPeerIdxpool); psessionEntry->gpLimPeerIdxpool = NULL; } if(psessionEntry->beacon != NULL) { vos_mem_free( psessionEntry->beacon); psessionEntry->beacon = NULL; } if(psessionEntry->assocReq != NULL) { vos_mem_free( psessionEntry->assocReq); psessionEntry->assocReq = NULL; } if(psessionEntry->assocRsp != NULL) { vos_mem_free( psessionEntry->assocRsp); psessionEntry->assocRsp = NULL; } if(psessionEntry->parsedAssocReq != NULL) { // Cleanup the individual allocation first for (i=0; i < psessionEntry->dph.dphHashTable.size; i++) { if ( psessionEntry->parsedAssocReq[i] != NULL ) { if( ((tpSirAssocReq)(psessionEntry->parsedAssocReq[i]))->assocReqFrame ) { vos_mem_free(((tpSirAssocReq) (psessionEntry->parsedAssocReq[i]))->assocReqFrame); ((tpSirAssocReq)(psessionEntry->parsedAssocReq[i]))->assocReqFrame = NULL; ((tpSirAssocReq)(psessionEntry->parsedAssocReq[i]))->assocReqFrameLength = 0; } vos_mem_free(psessionEntry->parsedAssocReq[i]); psessionEntry->parsedAssocReq[i] = NULL; } } // Cleanup the whole block vos_mem_free(psessionEntry->parsedAssocReq); psessionEntry->parsedAssocReq = NULL; } if (NULL != psessionEntry->limAssocResponseData) { vos_mem_free( psessionEntry->limAssocResponseData); psessionEntry->limAssocResponseData = NULL; } #if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR) if (NULL != psessionEntry->pLimMlmReassocRetryReq) { vos_mem_free( psessionEntry->pLimMlmReassocRetryReq); psessionEntry->pLimMlmReassocRetryReq = NULL; } #endif if (NULL != psessionEntry->pLimMlmReassocReq) { vos_mem_free( psessionEntry->pLimMlmReassocReq); psessionEntry->pLimMlmReassocReq = NULL; } #if defined(FEATURE_WLAN_ESE) && !defined(FEATURE_WLAN_ESE_UPLOAD) limCleanupEseCtxt(pMac, psessionEntry); #endif psessionEntry->valid = FALSE; return; } /*-------------------------------------------------------------------------- \brief peFindSessionByPeerSta() - looks up the PE session given the Station Address. This function returns the session context and the session ID if the session corresponding to the given station address is found in the PE session table. \param pMac - pointer to global adapter context \param sa - Peer STA Address of the session \param sessionId -session ID is returned here, if session is found. \return tpPESession - pointer to the session context or NULL if session is not found. \sa --------------------------------------------------------------------------*/ tpPESession peFindSessionByPeerSta(tpAniSirGlobal pMac, tANI_U8* sa, tANI_U8* sessionId) { tANI_U8 i; tpDphHashNode pSta; tANI_U16 aid; for(i =0; i < pMac->lim.maxBssId; i++) { if( (pMac->lim.gpSession[i].valid)) { pSta = dphLookupHashEntry(pMac, sa, &aid, &pMac->lim.gpSession[i].dph.dphHashTable); if (pSta != NULL) { *sessionId = i; return &pMac->lim.gpSession[i]; } } } limLog(pMac, LOG1, FL("Session lookup fails for Peer StaId: ")); limPrintMacAddr(pMac, sa, LOG1); return NULL; }
gpl-2.0
mitake/linux
drivers/i2c/i2c-dev.c
280
18662
/* i2c-dev.c - i2c-bus driver, char device interface Copyright (C) 1995-97 Simon G. Vogl Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl> Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* Note that this is a complete rewrite of Simon Vogl's i2c-dev module. But I have used so much of his original code and ideas that it seems only fair to recognize him as co-author -- Frodo */ /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/notifier.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/list.h> #include <linux/i2c.h> #include <linux/i2c-dev.h> #include <linux/jiffies.h> #include <linux/uaccess.h> /* * An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a * slave (i2c_client) with which messages will be exchanged. It's coupled * with a character special file which is accessed by user mode drivers. * * The list of i2c_dev structures is parallel to the i2c_adapter lists * maintained by the driver model, and is updated using bus notifications. */ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; struct device *dev; }; #define I2C_MINORS 256 static LIST_HEAD(i2c_dev_list); static DEFINE_SPINLOCK(i2c_dev_list_lock); static struct i2c_dev *i2c_dev_get_by_minor(unsigned index) { struct i2c_dev *i2c_dev; spin_lock(&i2c_dev_list_lock); list_for_each_entry(i2c_dev, &i2c_dev_list, list) { if (i2c_dev->adap->nr == index) goto found; } i2c_dev = NULL; found: spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) { struct i2c_dev *i2c_dev; if (adap->nr >= I2C_MINORS) { printk(KERN_ERR "i2c-dev: Out of device minors (%d)\n", adap->nr); return ERR_PTR(-ENODEV); } i2c_dev = kzalloc(sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return ERR_PTR(-ENOMEM); i2c_dev->adap = adap; spin_lock(&i2c_dev_list_lock); list_add_tail(&i2c_dev->list, &i2c_dev_list); spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static void return_i2c_dev(struct i2c_dev *i2c_dev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); spin_unlock(&i2c_dev_list_lock); kfree(i2c_dev); } static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt)); if (!i2c_dev) return -ENODEV; return sprintf(buf, "%s\n", i2c_dev->adap->name); } static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL); /* ------------------------------------------------------------------------- */ /* * After opening an instance of this character special file, a file * descriptor starts out associated only with an i2c_adapter (and bus). * * Using the I2C_RDWR ioctl(), you can then *immediately* issue i2c_msg * traffic to any devices on the bus used by that adapter. That's because * the i2c_msg vectors embed all the addressing information they need, and * are submitted directly to an i2c_adapter. However, SMBus-only adapters * don't support that interface. * * To use read()/write() system calls on that file descriptor, or to use * SMBus interfaces (and work with SMBus-only hosts!), you must first issue * an I2C_SLAVE (or I2C_SLAVE_FORCE) ioctl. That configures an anonymous * (never registered) i2c_client so it holds the addressing information * needed by those system calls and by this SMBus interface. */ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { char *tmp; int ret; struct i2c_client *client = file->private_data; if (count > 8192) count = 8192; tmp = kmalloc(count, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n", iminor(file->f_path.dentry->d_inode), count); ret = i2c_master_recv(client, tmp, count); if (ret >= 0) ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; kfree(tmp); return ret; } static ssize_t i2cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { int ret; char *tmp; struct i2c_client *client = file->private_data; if (count > 8192) count = 8192; tmp = memdup_user(buf, count); if (IS_ERR(tmp)) return PTR_ERR(tmp); pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n", iminor(file->f_path.dentry->d_inode), count); ret = i2c_master_send(client, tmp, count); kfree(tmp); return ret; } static int i2cdev_check(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); if (!client || client->addr != *(unsigned int *)addrp) return 0; return dev->driver ? -EBUSY : 0; } /* walk up mux tree */ static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result; result = device_for_each_child(&adapter->dev, &addr, i2cdev_check); if (!result && parent) result = i2cdev_check_mux_parents(parent, addr); return result; } /* recurse down mux tree */ static int i2cdev_check_mux_children(struct device *dev, void *addrp) { int result; if (dev->type == &i2c_adapter_type) result = device_for_each_child(dev, addrp, i2cdev_check_mux_children); else result = i2cdev_check(dev, addrp); return result; } /* This address checking function differs from the one in i2c-core in that it considers an address with a registered device, but no driver bound to it, as NOT busy. */ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result = 0; if (parent) result = i2cdev_check_mux_parents(parent, addr); if (!result) result = device_for_each_child(&adapter->dev, &addr, i2cdev_check_mux_children); return result; } static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client, unsigned long arg) { struct i2c_rdwr_ioctl_data rdwr_arg; struct i2c_msg *rdwr_pa; u8 __user **data_ptrs; int i, res; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data __user *)arg, sizeof(rdwr_arg))) return -EFAULT; /* Put an arbitrary limit on the number of messages that can * be sent at once */ if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS) return -EINVAL; rdwr_pa = memdup_user(rdwr_arg.msgs, rdwr_arg.nmsgs * sizeof(struct i2c_msg)); if (IS_ERR(rdwr_pa)) return PTR_ERR(rdwr_pa); data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL); if (data_ptrs == NULL) { kfree(rdwr_pa); return -ENOMEM; } res = 0; for (i = 0; i < rdwr_arg.nmsgs; i++) { /* Limit the size of the message to a sane amount */ if (rdwr_pa[i].len > 8192) { res = -EINVAL; break; } data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf; rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len); if (IS_ERR(rdwr_pa[i].buf)) { res = PTR_ERR(rdwr_pa[i].buf); break; } /* * If the message length is received from the slave (similar * to SMBus block read), we must ensure that the buffer will * be large enough to cope with a message length of * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus * drivers allow. The first byte in the buffer must be * pre-filled with the number of extra bytes, which must be * at least one to hold the message length, but can be * greater (for example to account for a checksum byte at * the end of the message.) */ if (rdwr_pa[i].flags & I2C_M_RECV_LEN) { if (!(rdwr_pa[i].flags & I2C_M_RD) || rdwr_pa[i].buf[0] < 1 || rdwr_pa[i].len < rdwr_pa[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { res = -EINVAL; break; } rdwr_pa[i].len = rdwr_pa[i].buf[0]; } } if (res < 0) { int j; for (j = 0; j < i; ++j) kfree(rdwr_pa[j].buf); kfree(data_ptrs); kfree(rdwr_pa); return res; } res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs); while (i-- > 0) { if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) { if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf, rdwr_pa[i].len)) res = -EFAULT; } kfree(rdwr_pa[i].buf); } kfree(data_ptrs); kfree(rdwr_pa); return res; } static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, unsigned long arg) { struct i2c_smbus_ioctl_data data_arg; union i2c_smbus_data temp; int datasize, res; if (copy_from_user(&data_arg, (struct i2c_smbus_ioctl_data __user *) arg, sizeof(struct i2c_smbus_ioctl_data))) return -EFAULT; if ((data_arg.size != I2C_SMBUS_BYTE) && (data_arg.size != I2C_SMBUS_QUICK) && (data_arg.size != I2C_SMBUS_BYTE_DATA) && (data_arg.size != I2C_SMBUS_WORD_DATA) && (data_arg.size != I2C_SMBUS_PROC_CALL) && (data_arg.size != I2C_SMBUS_BLOCK_DATA) && (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) && (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) && (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) { dev_dbg(&client->adapter->dev, "size out of range (%x) in ioctl I2C_SMBUS.\n", data_arg.size); return -EINVAL; } /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, so the check is valid if size==I2C_SMBUS_QUICK too. */ if ((data_arg.read_write != I2C_SMBUS_READ) && (data_arg.read_write != I2C_SMBUS_WRITE)) { dev_dbg(&client->adapter->dev, "read_write out of range (%x) in ioctl I2C_SMBUS.\n", data_arg.read_write); return -EINVAL; } /* Note that command values are always valid! */ if ((data_arg.size == I2C_SMBUS_QUICK) || ((data_arg.size == I2C_SMBUS_BYTE) && (data_arg.read_write == I2C_SMBUS_WRITE))) /* These are special: we do not use data */ return i2c_smbus_xfer(client->adapter, client->addr, client->flags, data_arg.read_write, data_arg.command, data_arg.size, NULL); if (data_arg.data == NULL) { dev_dbg(&client->adapter->dev, "data is NULL pointer in ioctl I2C_SMBUS.\n"); return -EINVAL; } if ((data_arg.size == I2C_SMBUS_BYTE_DATA) || (data_arg.size == I2C_SMBUS_BYTE)) datasize = sizeof(data_arg.data->byte); else if ((data_arg.size == I2C_SMBUS_WORD_DATA) || (data_arg.size == I2C_SMBUS_PROC_CALL)) datasize = sizeof(data_arg.data->word); else /* size == smbus block, i2c block, or block proc. call */ datasize = sizeof(data_arg.data->block); if ((data_arg.size == I2C_SMBUS_PROC_CALL) || (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) || (data_arg.read_write == I2C_SMBUS_WRITE)) { if (copy_from_user(&temp, data_arg.data, datasize)) return -EFAULT; } if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) { /* Convert old I2C block commands to the new convention. This preserves binary compatibility. */ data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA; if (data_arg.read_write == I2C_SMBUS_READ) temp.block[0] = I2C_SMBUS_BLOCK_MAX; } res = i2c_smbus_xfer(client->adapter, client->addr, client->flags, data_arg.read_write, data_arg.command, data_arg.size, &temp); if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) || (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) || (data_arg.read_write == I2C_SMBUS_READ))) { if (copy_to_user(data_arg.data, &temp, datasize)) return -EFAULT; } return res; } static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = file->private_data; unsigned long funcs; dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", cmd, arg); switch (cmd) { case I2C_SLAVE: case I2C_SLAVE_FORCE: /* NOTE: devices set up to work with "new style" drivers * can't use I2C_SLAVE, even when the device node is not * bound to a driver. Only I2C_SLAVE_FORCE will work. * * Setting the PEC flag here won't affect kernel drivers, * which will be using the i2c_client node registered with * the driver model core. Likewise, when that client has * the PEC flag already set, the i2c-dev driver won't see * (or use) this setting. */ if ((arg > 0x3ff) || (((client->flags & I2C_M_TEN) == 0) && arg > 0x7f)) return -EINVAL; if (cmd == I2C_SLAVE && i2cdev_check_addr(client->adapter, arg)) return -EBUSY; /* REVISIT: address could become busy later */ client->addr = arg; return 0; case I2C_TENBIT: if (arg) client->flags |= I2C_M_TEN; else client->flags &= ~I2C_M_TEN; return 0; case I2C_PEC: if (arg) client->flags |= I2C_CLIENT_PEC; else client->flags &= ~I2C_CLIENT_PEC; return 0; case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return put_user(funcs, (unsigned long __user *)arg); case I2C_RDWR: return i2cdev_ioctl_rdrw(client, arg); case I2C_SMBUS: return i2cdev_ioctl_smbus(client, arg); case I2C_RETRIES: client->adapter->retries = arg; break; case I2C_TIMEOUT: /* For historical reasons, user-space sets the timeout * value in units of 10 ms. */ client->adapter->timeout = msecs_to_jiffies(arg * 10); break; default: /* NOTE: returning a fault code here could cause trouble * in buggy userspace code. Some old kernel bugs returned * zero in this case, and userspace code might accidentally * have depended on that bug. */ return -ENOTTY; } return 0; } static int i2cdev_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct i2c_client *client; struct i2c_adapter *adap; struct i2c_dev *i2c_dev; i2c_dev = i2c_dev_get_by_minor(minor); if (!i2c_dev) return -ENODEV; adap = i2c_get_adapter(i2c_dev->adap->nr); if (!adap) return -ENODEV; /* This creates an anonymous i2c_client, which may later be * pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE. * * This client is ** NEVER REGISTERED ** with the driver model * or I2C core code!! It just holds private copies of addressing * information and maybe a PEC flag. */ client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { i2c_put_adapter(adap); return -ENOMEM; } snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr); client->adapter = adap; file->private_data = client; return 0; } static int i2cdev_release(struct inode *inode, struct file *file) { struct i2c_client *client = file->private_data; i2c_put_adapter(client->adapter); kfree(client); file->private_data = NULL; return 0; } static const struct file_operations i2cdev_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = i2cdev_read, .write = i2cdev_write, .unlocked_ioctl = i2cdev_ioctl, .open = i2cdev_open, .release = i2cdev_release, }; /* ------------------------------------------------------------------------- */ static struct class *i2c_dev_class; static int i2cdev_attach_adapter(struct device *dev, void *dummy) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; int res; if (dev->type != &i2c_adapter_type) return 0; adap = to_i2c_adapter(dev); i2c_dev = get_free_i2c_dev(adap); if (IS_ERR(i2c_dev)) return PTR_ERR(i2c_dev); /* register this i2c device with the driver core */ i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, MKDEV(I2C_MAJOR, adap->nr), NULL, "i2c-%d", adap->nr); if (IS_ERR(i2c_dev->dev)) { res = PTR_ERR(i2c_dev->dev); goto error; } res = device_create_file(i2c_dev->dev, &dev_attr_name); if (res) goto error_destroy; pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; error_destroy: device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); error: return_i2c_dev(i2c_dev); return res; } static int i2cdev_detach_adapter(struct device *dev, void *dummy) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; if (dev->type != &i2c_adapter_type) return 0; adap = to_i2c_adapter(dev); i2c_dev = i2c_dev_get_by_minor(adap->nr); if (!i2c_dev) /* attach_adapter must have failed */ return 0; device_remove_file(i2c_dev->dev, &dev_attr_name); return_i2c_dev(i2c_dev); device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); return 0; } static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; switch (action) { case BUS_NOTIFY_ADD_DEVICE: return i2cdev_attach_adapter(dev, NULL); case BUS_NOTIFY_DEL_DEVICE: return i2cdev_detach_adapter(dev, NULL); } return 0; } static struct notifier_block i2cdev_notifier = { .notifier_call = i2cdev_notifier_call, }; /* ------------------------------------------------------------------------- */ /* * module load/unload record keeping */ static int __init i2c_dev_init(void) { int res; printk(KERN_INFO "i2c /dev entries driver\n"); res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops); if (res) goto out; i2c_dev_class = class_create(THIS_MODULE, "i2c-dev"); if (IS_ERR(i2c_dev_class)) { res = PTR_ERR(i2c_dev_class); goto out_unreg_chrdev; } /* Keep track of adapters which will be added or removed later */ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier); if (res) goto out_unreg_class; /* Bind to already existing adapters right away */ i2c_for_each_dev(NULL, i2cdev_attach_adapter); return 0; out_unreg_class: class_destroy(i2c_dev_class); out_unreg_chrdev: unregister_chrdev(I2C_MAJOR, "i2c"); out: printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__); return res; } static void __exit i2c_dev_exit(void) { bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); i2c_for_each_dev(NULL, i2cdev_detach_adapter); class_destroy(i2c_dev_class); unregister_chrdev(I2C_MAJOR, "i2c"); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " "Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C /dev entries driver"); MODULE_LICENSE("GPL"); module_init(i2c_dev_init); module_exit(i2c_dev_exit);
gpl-2.0
mehrvarz/msm-kitkat-tm-usbhost-charge
drivers/staging/prima/CORE/BAP/src/bapApiLinkSupervision.c
1304
21704
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /*=========================================================================== b a p A p i LinkSupervision . C OVERVIEW: This software unit holds the implementation of the WLAN BAP modules "platform independent" Data path functions. The functions externalized by this module are to be called ONLY by other WLAN modules (HDD) that properly register with the BAP Layer initially. DEPENDENCIES: Are listed for each API below. Copyright (c) 2008 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. when who what, where, why ---------- --- -------------------------------------------------------- 2008-03-25 arulv Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ //#include "wlan_qct_tl.h" #include "vos_trace.h" //I need the TL types and API #include "wlan_qct_tl.h" /* BT-AMP PAL API header file */ #include "bapApi.h" #include "bapInternal.h" #include "bapApiTimer.h" /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ #if 1 //*BT-AMP packet LLC OUI value*/ static const v_U8_t WLANBAP_BT_AMP_OUI[] = {0x00, 0x19, 0x58 }; /*LLC header value*/ static v_U8_t WLANBAP_LLC_HEADER[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00 }; #endif /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ VOS_STATUS WLANBAP_AcquireLSPacket( ptBtampContext pBtampCtx, vos_pkt_t **ppPacket, v_U16_t size, tANI_BOOLEAN isLsReq ) { VOS_STATUS vosStatus; vos_pkt_t *pPacket; WLANBAP_8023HeaderType w8023Header; v_U8_t aucLLCHeader[WLANBAP_LLC_HEADER_LEN]; v_U16_t headerLength; /* The 802.3 frame length*/ v_U16_t protoType; v_U8_t *pData = NULL; if(isLsReq) { protoType = WLANTL_BT_AMP_TYPE_LS_REQ; } else { protoType = WLANTL_BT_AMP_TYPE_LS_REP; } //If success, vosTxLsPacket is the packet and pData points to the head. vosStatus = vos_pkt_get_packet( &pPacket, VOS_PKT_TYPE_TX_802_11_MGMT,size, 1, VOS_TRUE, NULL, NULL ); if( VOS_IS_STATUS_SUCCESS( vosStatus ) ) { vosStatus = vos_pkt_reserve_head( pPacket, (v_VOID_t *)&pData, size ); if( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "%s: failed to reserve size = %d\n",__func__, size ); vos_pkt_return_packet( pPacket ); } } if( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "WLANBAP_LinkSupervisionTimerHandler failed to get vos_pkt\n" ); return vosStatus; } // Form the 802.3 header vos_mem_copy( w8023Header.vDA, pBtampCtx->peer_mac_addr, VOS_MAC_ADDR_SIZE); vos_mem_copy( w8023Header.vSA, pBtampCtx->self_mac_addr, VOS_MAC_ADDR_SIZE); headerLength = WLANBAP_LLC_HEADER_LEN; /* Now the 802.3 length field is big-endian?! */ w8023Header.usLenType = vos_cpu_to_be16(headerLength); /* Now adjust the protocol type bytes*/ protoType = vos_cpu_to_be16( protoType); /* Now form the LLC header */ vos_mem_copy(aucLLCHeader, WLANBAP_LLC_HEADER, sizeof(WLANBAP_LLC_HEADER)); vos_mem_copy(&aucLLCHeader[WLANBAP_LLC_OUI_OFFSET], WLANBAP_BT_AMP_OUI, WLANBAP_LLC_OUI_SIZE); vos_mem_copy(&aucLLCHeader[WLANBAP_LLC_PROTO_TYPE_OFFSET], &protoType, //WLANBAP_BT_AMP_TYPE_LS_REQ WLANBAP_LLC_PROTO_TYPE_SIZE); /* Push on the LLC header */ vos_pkt_push_head(pPacket, aucLLCHeader, WLANBAP_LLC_HEADER_LEN); /* Push on the 802.3 header */ vos_pkt_push_head(pPacket, &w8023Header, sizeof(w8023Header)); *ppPacket = pPacket; return vosStatus; } /*=========================================================================== FUNCTION WLANBAP_InitLinkSupervision DESCRIPTION This API will be called when Link Supervision module is to be initialized when connected at BAP PARAMETERS btampHandle: The BT-AMP PAL handle returned in WLANBAP_GetNewHndl. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: BAP handle is NULL VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ #define TX_LS_DATALEN 32 VOS_STATUS WLANBAP_InitLinkSupervision ( ptBtampHandle btampHandle ) { VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; ptBtampContext pBtampCtx = (ptBtampContext) btampHandle; vos_pkt_t *pLSReqPacket; vos_pkt_t *pLSRepPacket; v_U16_t lsPktln; if ( NULL == pBtampCtx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in %s", __func__); return VOS_STATUS_E_FAULT; } #if 0 /* Initialize Link supervision data structure */ vos_mem_set(pLsInfo, sizeof(tBtampLS),0); /* Allocate memory for Static Tx Data */ pLsInfo->pTxPktData = vos_mem_malloc(sizeof(tBtampLsPktData)+TX_LS_DATALEN); /* Initialize Static data for LS pkt Tx */ pLsInfo->pTxPktData->BufLen = TX_LS_DATALEN; vos_mem_copy (&pLsInfo->pTxPktData->pBuf, LsTxData, pLsInfo->pTxPktData->BufLen); #endif pBtampCtx->lsReqPktPending = VOS_FALSE; pBtampCtx->retries = 0; vosStatus = WLANBAP_AcquireLSPacket( pBtampCtx, &pLSReqPacket,32, TRUE ); if( VOS_IS_STATUS_SUCCESS( vosStatus ) ) { pBtampCtx->lsReqPacket = pLSReqPacket; } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "%s:AcquireLSPacket failed\n",__func__); pBtampCtx->lsReqPacket = NULL; return vosStatus; } vosStatus = WLANBAP_AcquireLSPacket( pBtampCtx, &pLSRepPacket,32,FALSE ); if( VOS_IS_STATUS_SUCCESS( vosStatus ) ) { pBtampCtx->lsRepPacket = pLSRepPacket; } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "%s:AcquireLSPacket failed\n",__func__); pBtampCtx->lsRepPacket = NULL; return vosStatus; } vosStatus = vos_pkt_get_packet_length(pBtampCtx->lsRepPacket,&lsPktln); if ( VOS_STATUS_SUCCESS != vosStatus ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "%s:vos_pkt_get_length error",__func__); return VOS_STATUS_E_FAULT; } pBtampCtx->lsPktln = lsPktln; /* Start Link Supervision Timer if not configured for infinite */ if (pBtampCtx->bapLinkSupervisionTimerInterval) { vosStatus = WLANBAP_StartLinkSupervisionTimer (pBtampCtx, pBtampCtx->bapLinkSupervisionTimerInterval * WLANBAP_BREDR_BASEBAND_SLOT_TIME); } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "%s:No LS configured for infinite",__func__); } return vosStatus; } /*=========================================================================== FUNCTION WLANBAP_DeInitLinkSupervision DESCRIPTION This API will be called when Link Supervision module is to be stopped after disconnected at BAP PARAMETERS btampHandle: The BT-AMP PAL handle returned in WLANBAP_GetNewHndl. RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: BAP handle is NULL VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_DeInitLinkSupervision ( ptBtampHandle btampHandle ) { VOS_STATUS vosStatus = VOS_STATUS_SUCCESS; ptBtampContext pBtampCtx = (ptBtampContext) btampHandle; if ( NULL == pBtampCtx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in %s", __func__); return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "In: %s", __func__); vosStatus = WLANBAP_StopLinkSupervisionTimer(pBtampCtx); /*Free the vos packet*/ if ( pBtampCtx->lsRepPacket ) { vosStatus = vos_pkt_return_packet(pBtampCtx->lsRepPacket); pBtampCtx->lsRepPacket = NULL; } if ( pBtampCtx->lsReqPacket ) { vosStatus = vos_pkt_return_packet(pBtampCtx->lsReqPacket); pBtampCtx->lsReqPacket = NULL; } return vosStatus; } /*=========================================================================== FUNCTION WLANBAP_RxProcLsPkt DESCRIPTION This API will be called when Link Supervision frames are received at BAP PARAMETERS btampHandle: The BT-AMP PAL handle returned in WLANBAP_GetNewHndl. pucAC: Pointer to return the access category vosDataBuff: The data buffer containing the 802.3 frame to be translated to BT HCI Data Packet RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: BAP handle is NULL VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_RxProcLsPkt ( ptBtampHandle btampHandle, v_U8_t phy_link_handle, /* Used by BAP to indentify the WLAN assoc. (StaId) */ v_U16_t RxProtoType, /* Protocol Type from the frame received */ vos_pkt_t *vosRxLsBuff ) { VOS_STATUS vosStatus; ptBtampContext pBtampCtx = (ptBtampContext) btampHandle; WLANBAP_8023HeaderType w8023Header; v_SIZE_t HeaderLen = sizeof(w8023Header); /*------------------------------------------------------------------------ Sanity check params ------------------------------------------------------------------------*/ if ( NULL == pBtampCtx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Invalid BAP handle value in %s", __func__); return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "In %s Received RxProtoType=%x", __func__,RxProtoType); vos_pkt_extract_data(vosRxLsBuff,0,(v_VOID_t*)&w8023Header,&HeaderLen); if ( !(vos_mem_compare( w8023Header.vDA, pBtampCtx->self_mac_addr, VOS_MAC_ADDR_SIZE) && vos_mem_compare( w8023Header.vSA, pBtampCtx->peer_mac_addr, VOS_MAC_ADDR_SIZE))) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "MAC address mismatch in %s", __func__); return VOS_STATUS_E_FAULT; } /*Free the vos packet*/ vosStatus = vos_pkt_return_packet( vosRxLsBuff ); if ( VOS_STATUS_SUCCESS != vosStatus) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Failed to free VOS packet in %s", __func__); return VOS_STATUS_E_FAULT; } /* Reset Link Supervision timer */ if (RxProtoType == WLANTL_BT_AMP_TYPE_LS_REP) { pBtampCtx->lsReqPktPending = FALSE; pBtampCtx->retries = 0; if (pBtampCtx->bapLinkSupervisionTimerInterval) { /* Restart the LS timer */ WLANBAP_StopLinkSupervisionTimer(pBtampCtx); vosStatus = WLANBAP_StartLinkSupervisionTimer (pBtampCtx, pBtampCtx->bapLinkSupervisionTimerInterval * WLANBAP_BREDR_BASEBAND_SLOT_TIME); } } else if(RxProtoType == WLANTL_BT_AMP_TYPE_LS_REQ) { if (pBtampCtx->bapLinkSupervisionTimerInterval) { /* Restart the LS timer */ WLANBAP_StopLinkSupervisionTimer(pBtampCtx); vosStatus = WLANBAP_StartLinkSupervisionTimer (pBtampCtx, pBtampCtx->bapLinkSupervisionTimerInterval * WLANBAP_BREDR_BASEBAND_SLOT_TIME); } pBtampCtx->pPacket = pBtampCtx->lsRepPacket; // Handle LS rep frame vosStatus = WLANBAP_TxLinkSupervision( btampHandle, phy_link_handle, pBtampCtx->pPacket, WLANTL_BT_AMP_TYPE_LS_REP); } return vosStatus; } /* Tx callback function for LS packet */ static VOS_STATUS WLANBAP_TxLinkSupervisionCB ( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket, VOS_STATUS retStatus ) { VOS_STATUS vosStatus; ptBtampContext bapContext; /* Holds the btampContext value returned */ vos_pkt_t *pLSPacket; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "TxCompCB reached for LS Pkt"); /* Get the BT AMP context from the global */ bapContext = gpBtampCtx; if (!VOS_IS_STATUS_SUCCESS (retStatus)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "TxCompCB:Transmit status Failure"); } if ( pPacket == NULL ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "WLANBAP_TxCompCB bad input\n" ); return VOS_STATUS_E_FAILURE; } /* Return the packet & reallocate */ if( pPacket == bapContext->lsReqPacket ) { vosStatus = WLANBAP_AcquireLSPacket( bapContext, &pLSPacket,32, TRUE ); if( VOS_IS_STATUS_SUCCESS( vosStatus ) ) { bapContext->lsReqPacket = pLSPacket; } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "%s:AcquireLSPacket failed\n",__func__); bapContext->lsReqPacket = NULL; return vosStatus; } } else { vosStatus = WLANBAP_AcquireLSPacket( bapContext, &pLSPacket,32, FALSE ); if( VOS_IS_STATUS_SUCCESS( vosStatus ) ) { bapContext->lsRepPacket = pLSPacket; } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "%s:AcquireLSPacket failed\n",__func__); bapContext->lsRepPacket = NULL; return vosStatus; } } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO, "%s:Returned Vos Packet:%p\n", __func__, pPacket ); vos_pkt_return_packet( pPacket ); return (VOS_STATUS_SUCCESS ); } /*=========================================================================== FUNCTION WLANBAP_TxLinkSupervision DESCRIPTION This API will be called to process Link Supervision Request received PARAMETERS btampHandle: The BT-AMP PAL handle returned in WLANBAP_GetNewHndl. pucAC: Pointer to return the access category vosDataBuff: The data buffer containing the 802.3 frame to be translated to BT HCI Data Packet RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_INVAL: Input parameters are invalid VOS_STATUS_E_FAULT: BAP handle is NULL VOS_STATUS_SUCCESS: Everything is good :) SIDE EFFECTS ============================================================================*/ VOS_STATUS WLANBAP_TxLinkSupervision ( ptBtampHandle btampHandle, v_U8_t phy_link_handle, /* Used by BAP to indentify the WLAN assoc. (StaId) */ vos_pkt_t *pPacket, v_U16_t protoType ) { ptBtampContext pBtampCtx = (ptBtampContext)btampHandle; VOS_STATUS vosStatus = VOS_STATUS_E_FAILURE; v_PVOID_t pvosGCtx; v_U8_t ucSTAId; /* The StaId (used by TL, PE, and HAL) */ v_PVOID_t pHddHdl; /* Handle to return BSL context in */ WLANTL_MetaInfoType metaInfo; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "In : %s protoType=%x", __func__,protoType); // Retrieve the VOSS context pvosGCtx = pBtampCtx->pvosGCtx; /* Lookup the StaId using the phy_link_handle and the BAP context */ vosStatus = WLANBAP_GetStaIdFromLinkCtx ( btampHandle, /* btampHandle value in */ phy_link_handle, /* phy_link_handle value in */ &ucSTAId, /* The StaId (used by TL, PE, and HAL) */ &pHddHdl); /* Handle to return BSL context */ if ( VOS_STATUS_SUCCESS != vosStatus ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Unable to retrieve STA Id from BAP context and phy_link_handle in WLANBAP_TxLinKSupervisionReq"); return VOS_STATUS_E_FAULT; } vos_mem_zero( &metaInfo, sizeof( WLANTL_MetaInfoType ) ); metaInfo.ucTID = 0x00 ; metaInfo.ucUP = 0x00; metaInfo.ucIsEapol = VOS_FALSE;//Notify TL that this is NOT an EAPOL frame metaInfo.ucDisableFrmXtl = VOS_FALSE; metaInfo.ucType = 0x00; pBtampCtx->metaInfo = metaInfo; vosStatus = WLANTL_TxBAPFrm( pvosGCtx, pPacket, &metaInfo, WLANBAP_TxLinkSupervisionCB ); if( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Tx: Packet rejected by TL in WLANBAP_TxLinkSupervisionReq"); return vosStatus; } if(protoType == WLANTL_BT_AMP_TYPE_LS_REQ) { pBtampCtx->lsReqPktPending = TRUE; pBtampCtx->retries++; } if (pBtampCtx->bapLinkSupervisionTimerInterval) { /* Restart the LS timer */ WLANBAP_StopLinkSupervisionTimer(pBtampCtx); vosStatus = WLANBAP_StartLinkSupervisionTimer (pBtampCtx, pBtampCtx->bapLinkSupervisionTimerInterval * WLANBAP_BREDR_BASEBAND_SLOT_TIME); } if( !VOS_IS_STATUS_SUCCESS( vosStatus ) ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "WLANBAP_TxLinkSupervisionReq failed to Start LinkSupervision Timer\n" ); return vosStatus; } return vosStatus; } /* WLANBAP_RxLinkSupervisionReq */
gpl-2.0
MikeC84/android_kernel_motorola_shamu
fs/minix/inode.c
1816
18601
/* * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Copyright (C) 1996 Gertjan van Wingerde * Minix V2 fs support. * * Modified for 680x0 by Andreas Schwab * Updated to filesystem version 3 by Daniel Aragones */ #include <linux/module.h> #include "minix.h" #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/highuid.h> #include <linux/vfs.h> #include <linux/writeback.h> static int minix_write_inode(struct inode *inode, struct writeback_control *wbc); static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); static int minix_remount (struct super_block * sb, int * flags, char * data); static void minix_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); if (!inode->i_nlink) { inode->i_size = 0; minix_truncate(inode); } invalidate_inode_buffers(inode); clear_inode(inode); if (!inode->i_nlink) minix_free_inode(inode); } static void minix_put_super(struct super_block *sb) { int i; struct minix_sb_info *sbi = minix_sb(sb); if (!(sb->s_flags & MS_RDONLY)) { if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ sbi->s_ms->s_state = sbi->s_mount_state; mark_buffer_dirty(sbi->s_sbh); } for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) brelse(sbi->s_zmap[i]); brelse (sbi->s_sbh); kfree(sbi->s_imap); sb->s_fs_info = NULL; kfree(sbi); } static struct kmem_cache * minix_inode_cachep; static struct inode *minix_alloc_inode(struct super_block *sb) { struct minix_inode_info *ei; ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void minix_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(minix_inode_cachep, minix_i(inode)); } static void minix_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, minix_i_callback); } static void init_once(void *foo) { struct minix_inode_info *ei = (struct minix_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { minix_inode_cachep = kmem_cache_create("minix_inode_cache", sizeof(struct minix_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (minix_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(minix_inode_cachep); } static const struct super_operations minix_sops = { .alloc_inode = minix_alloc_inode, .destroy_inode = minix_destroy_inode, .write_inode = minix_write_inode, .evict_inode = minix_evict_inode, .put_super = minix_put_super, .statfs = minix_statfs, .remount_fs = minix_remount, }; static int minix_remount (struct super_block * sb, int * flags, char * data) { struct minix_sb_info * sbi = minix_sb(sb); struct minix_super_block * ms; ms = sbi->s_ms; if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) return 0; if (*flags & MS_RDONLY) { if (ms->s_state & MINIX_VALID_FS || !(sbi->s_mount_state & MINIX_VALID_FS)) return 0; /* Mounting a rw partition read-only. */ if (sbi->s_version != MINIX_V3) ms->s_state = sbi->s_mount_state; mark_buffer_dirty(sbi->s_sbh); } else { /* Mount a partition which is read-only, read-write. */ if (sbi->s_version != MINIX_V3) { sbi->s_mount_state = ms->s_state; ms->s_state &= ~MINIX_VALID_FS; } else { sbi->s_mount_state = MINIX_VALID_FS; } mark_buffer_dirty(sbi->s_sbh); if (!(sbi->s_mount_state & MINIX_VALID_FS)) printk("MINIX-fs warning: remounting unchecked fs, " "running fsck is recommended\n"); else if ((sbi->s_mount_state & MINIX_ERROR_FS)) printk("MINIX-fs warning: remounting fs with errors, " "running fsck is recommended\n"); } return 0; } static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; struct buffer_head **map; struct minix_super_block *ms; struct minix3_super_block *m3s = NULL; unsigned long i, block; struct inode *root_inode; struct minix_sb_info *sbi; int ret = -EINVAL; sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; s->s_fs_info = sbi; BUILD_BUG_ON(32 != sizeof (struct minix_inode)); BUILD_BUG_ON(64 != sizeof(struct minix2_inode)); if (!sb_set_blocksize(s, BLOCK_SIZE)) goto out_bad_hblock; if (!(bh = sb_bread(s, 1))) goto out_bad_sb; ms = (struct minix_super_block *) bh->b_data; sbi->s_ms = ms; sbi->s_sbh = bh; sbi->s_mount_state = ms->s_state; sbi->s_ninodes = ms->s_ninodes; sbi->s_nzones = ms->s_nzones; sbi->s_imap_blocks = ms->s_imap_blocks; sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; sbi->s_max_size = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; sbi->s_dirsize = 16; sbi->s_namelen = 14; s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX_SUPER_MAGIC2) { sbi->s_version = MINIX_V1; sbi->s_dirsize = 32; sbi->s_namelen = 30; s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 16; sbi->s_namelen = 14; s->s_max_links = MINIX2_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC2) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 32; sbi->s_namelen = 30; s->s_max_links = MINIX2_LINK_MAX; } else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) { m3s = (struct minix3_super_block *) bh->b_data; s->s_magic = m3s->s_magic; sbi->s_imap_blocks = m3s->s_imap_blocks; sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; sbi->s_max_size = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; sbi->s_namelen = 60; sbi->s_version = MINIX_V3; sbi->s_mount_state = MINIX_VALID_FS; sb_set_blocksize(s, m3s->s_blocksize); s->s_max_links = MINIX2_LINK_MAX; } else goto out_no_fs; /* * Allocate the buffer map to keep the superblock small. */ if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) goto out_no_map; sbi->s_imap = &map[0]; sbi->s_zmap = &map[sbi->s_imap_blocks]; block=2; for (i=0 ; i < sbi->s_imap_blocks ; i++) { if (!(sbi->s_imap[i]=sb_bread(s, block))) goto out_no_bitmap; block++; } for (i=0 ; i < sbi->s_zmap_blocks ; i++) { if (!(sbi->s_zmap[i]=sb_bread(s, block))) goto out_no_bitmap; block++; } minix_set_bit(0,sbi->s_imap[0]->b_data); minix_set_bit(0,sbi->s_zmap[0]->b_data); /* Apparently minix can create filesystems that allocate more blocks for * the bitmaps than needed. We simply ignore that, but verify it didn't * create one with not enough blocks and bail out if so. */ block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); if (sbi->s_imap_blocks < block) { printk("MINIX-fs: file system does not have enough " "imap blocks allocated. Refusing to mount\n"); goto out_no_bitmap; } block = minix_blocks_needed( (sbi->s_nzones - (sbi->s_firstdatazone + 1)), s->s_blocksize); if (sbi->s_zmap_blocks < block) { printk("MINIX-fs: file system does not have enough " "zmap blocks allocated. Refusing to mount.\n"); goto out_no_bitmap; } /* set up enough so that it can read an inode */ s->s_op = &minix_sops; root_inode = minix_iget(s, MINIX_ROOT_INO); if (IS_ERR(root_inode)) { ret = PTR_ERR(root_inode); goto out_no_root; } ret = -ENOMEM; s->s_root = d_make_root(root_inode); if (!s->s_root) goto out_no_root; if (!(s->s_flags & MS_RDONLY)) { if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ ms->s_state &= ~MINIX_VALID_FS; mark_buffer_dirty(bh); } if (!(sbi->s_mount_state & MINIX_VALID_FS)) printk("MINIX-fs: mounting unchecked file system, " "running fsck is recommended\n"); else if (sbi->s_mount_state & MINIX_ERROR_FS) printk("MINIX-fs: mounting file system with errors, " "running fsck is recommended\n"); return 0; out_no_root: if (!silent) printk("MINIX-fs: get root inode failed\n"); goto out_freemap; out_no_bitmap: printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); out_freemap: for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) brelse(sbi->s_zmap[i]); kfree(sbi->s_imap); goto out_release; out_no_map: ret = -ENOMEM; if (!silent) printk("MINIX-fs: can't allocate map\n"); goto out_release; out_illegal_sb: if (!silent) printk("MINIX-fs: bad superblock\n"); goto out_release; out_no_fs: if (!silent) printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 " "on device %s.\n", s->s_id); out_release: brelse(bh); goto out; out_bad_hblock: printk("MINIX-fs: blocksize too small for device\n"); goto out; out_bad_sb: printk("MINIX-fs: unable to read superblock\n"); out: s->s_fs_info = NULL; kfree(sbi); return ret; } static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct minix_sb_info *sbi = minix_sb(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; buf->f_bfree = minix_count_free_blocks(sb); buf->f_bavail = buf->f_bfree; buf->f_files = sbi->s_ninodes; buf->f_ffree = minix_count_free_inodes(sb); buf->f_namelen = sbi->s_namelen; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } static int minix_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { if (INODE_VERSION(inode) == MINIX_V1) return V1_minix_get_block(inode, block, bh_result, create); else return V2_minix_get_block(inode, block, bh_result, create); } static int minix_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, minix_get_block, wbc); } static int minix_readpage(struct file *file, struct page *page) { return block_read_full_page(page,minix_get_block); } int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len) { return __block_write_begin(page, pos, len, minix_get_block); } static void minix_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, to, inode->i_size); minix_truncate(inode); } } static int minix_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, minix_get_block); if (unlikely(ret)) minix_write_failed(mapping, pos + len); return ret; } static sector_t minix_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,minix_get_block); } static const struct address_space_operations minix_aops = { .readpage = minix_readpage, .writepage = minix_writepage, .write_begin = minix_write_begin, .write_end = generic_write_end, .bmap = minix_bmap }; static const struct inode_operations minix_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = minix_getattr, }; void minix_set_inode(struct inode *inode, dev_t rdev) { if (S_ISREG(inode->i_mode)) { inode->i_op = &minix_file_inode_operations; inode->i_fop = &minix_file_operations; inode->i_mapping->a_ops = &minix_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &minix_dir_inode_operations; inode->i_fop = &minix_dir_operations; inode->i_mapping->a_ops = &minix_aops; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &minix_symlink_inode_operations; inode->i_mapping->a_ops = &minix_aops; } else init_special_inode(inode, inode->i_mode, rdev); } /* * The minix V1 function to read an inode. */ static struct inode *V1_minix_iget(struct inode *inode) { struct buffer_head * bh; struct minix_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) { iget_failed(inode); return ERR_PTR(-EIO); } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); set_nlink(inode, raw_inode->i_nlinks); inode->i_size = raw_inode->i_size; inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time; inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; inode->i_blocks = 0; for (i = 0; i < 9; i++) minix_inode->u.i1_data[i] = raw_inode->i_zone[i]; minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); brelse(bh); unlock_new_inode(inode); return inode; } /* * The minix V2 function to read an inode. */ static struct inode *V2_minix_iget(struct inode *inode) { struct buffer_head * bh; struct minix2_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) { iget_failed(inode); return ERR_PTR(-EIO); } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); set_nlink(inode, raw_inode->i_nlinks); inode->i_size = raw_inode->i_size; inode->i_mtime.tv_sec = raw_inode->i_mtime; inode->i_atime.tv_sec = raw_inode->i_atime; inode->i_ctime.tv_sec = raw_inode->i_ctime; inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; inode->i_blocks = 0; for (i = 0; i < 10; i++) minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); brelse(bh); unlock_new_inode(inode); return inode; } /* * The global function to read an inode. */ struct inode *minix_iget(struct super_block *sb, unsigned long ino) { struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; if (INODE_VERSION(inode) == MINIX_V1) return V1_minix_iget(inode); else return V2_minix_iget(inode); } /* * The minix V1 function to synchronize an inode. */ static struct buffer_head * V1_minix_update_inode(struct inode * inode) { struct buffer_head * bh; struct minix_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) return NULL; raw_inode->i_mode = inode->i_mode; raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); raw_inode->i_nlinks = inode->i_nlink; raw_inode->i_size = inode->i_size; raw_inode->i_time = inode->i_mtime.tv_sec; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev); else for (i = 0; i < 9; i++) raw_inode->i_zone[i] = minix_inode->u.i1_data[i]; mark_buffer_dirty(bh); return bh; } /* * The minix V2 function to synchronize an inode. */ static struct buffer_head * V2_minix_update_inode(struct inode * inode) { struct buffer_head * bh; struct minix2_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) return NULL; raw_inode->i_mode = inode->i_mode; raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); raw_inode->i_nlinks = inode->i_nlink; raw_inode->i_size = inode->i_size; raw_inode->i_mtime = inode->i_mtime.tv_sec; raw_inode->i_atime = inode->i_atime.tv_sec; raw_inode->i_ctime = inode->i_ctime.tv_sec; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev); else for (i = 0; i < 10; i++) raw_inode->i_zone[i] = minix_inode->u.i2_data[i]; mark_buffer_dirty(bh); return bh; } static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) { int err = 0; struct buffer_head *bh; if (INODE_VERSION(inode) == MINIX_V1) bh = V1_minix_update_inode(inode); else bh = V2_minix_update_inode(inode); if (!bh) return -EIO; if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk("IO error syncing minix inode [%s:%08lx]\n", inode->i_sb->s_id, inode->i_ino); err = -EIO; } } brelse (bh); return err; } int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct super_block *sb = dentry->d_sb; generic_fillattr(dentry->d_inode, stat); if (INODE_VERSION(dentry->d_inode) == MINIX_V1) stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb); else stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb); stat->blksize = sb->s_blocksize; return 0; } /* * The function that is called for file truncation. */ void minix_truncate(struct inode * inode) { if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; if (INODE_VERSION(inode) == MINIX_V1) V1_minix_truncate(inode); else V2_minix_truncate(inode); } static struct dentry *minix_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, minix_fill_super); } static struct file_system_type minix_fs_type = { .owner = THIS_MODULE, .name = "minix", .mount = minix_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("minix"); static int __init init_minix_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&minix_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_minix_fs(void) { unregister_filesystem(&minix_fs_type); destroy_inodecache(); } module_init(init_minix_fs) module_exit(exit_minix_fs) MODULE_LICENSE("GPL");
gpl-2.0
librae8226/linux-3.0
arch/powerpc/sysdev/mpc8xxx_gpio.c
2584
10204
/* * GPIOs on MPC512x/8349/8572/8610 and compatible * * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/irq.h> #define MPC8XXX_GPIO_PINS 32 #define GPIO_DIR 0x00 #define GPIO_ODR 0x04 #define GPIO_DAT 0x08 #define GPIO_IER 0x0c #define GPIO_IMR 0x10 #define GPIO_ICR 0x14 #define GPIO_ICR2 0x18 struct mpc8xxx_gpio_chip { struct of_mm_gpio_chip mm_gc; spinlock_t lock; /* * shadowed data register to be able to clear/set output pins in * open drain mode safely */ u32 data; struct irq_host *irq; void *of_dev_id_data; }; static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) { return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio); } static inline struct mpc8xxx_gpio_chip * to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) { return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); } static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); } /* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs * defined as output cannot be determined by reading GPDAT register, * so we use shadow data register instead. The status of input pins * is determined by reading GPDAT register. */ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio) { u32 val; struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio); } static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); } static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); if (val) mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio); else mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio); out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; mpc8xxx_gpio_set(gc, gpio, val); spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS) return irq_create_mapping(mpc8xxx_gc->irq, offset); else return -ENXIO; } static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned int mask; mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); if (mask) generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 32 - ffs(mask))); } static void mpc8xxx_irq_unmask(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_mask(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_ack(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); } static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: return -EINVAL; } return 0; } static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long gpio = irqd_to_hwirq(d); void __iomem *reg; unsigned int shift; unsigned long flags; if (gpio < 16) { reg = mm->regs + GPIO_ICR; shift = (15 - gpio) * 2; } else { reg = mm->regs + GPIO_ICR2; shift = (15 - (gpio % 16)) * 2; } switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_LEVEL_LOW: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 2 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_LEVEL_HIGH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 1 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(reg, 3 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: return -EINVAL; } return 0; } static struct irq_chip mpc8xxx_irq_chip = { .name = "mpc8xxx-gpio", .irq_unmask = mpc8xxx_irq_unmask, .irq_mask = mpc8xxx_irq_mask, .irq_ack = mpc8xxx_irq_ack, .irq_set_type = mpc8xxx_irq_set_type, }; static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; if (mpc8xxx_gc->of_dev_id_data) mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { /* interrupt sense values coming from the device tree equal either * EDGE_FALLING or EDGE_BOTH */ *out_hwirq = intspec[0]; *out_flags = intspec[1]; return 0; } static struct irq_host_ops mpc8xxx_gpio_irq_ops = { .map = mpc8xxx_gpio_irq_map, .xlate = mpc8xxx_gpio_irq_xlate, }; static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { { .compatible = "fsl,mpc8349-gpio", }, { .compatible = "fsl,mpc8572-gpio", }, { .compatible = "fsl,mpc8610-gpio", }, { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, }, { .compatible = "fsl,qoriq-gpio", }, {} }; static void __init mpc8xxx_add_controller(struct device_node *np) { struct mpc8xxx_gpio_chip *mpc8xxx_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; const struct of_device_id *id; unsigned hwirq; int ret; mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL); if (!mpc8xxx_gc) { ret = -ENOMEM; goto err; } spin_lock_init(&mpc8xxx_gc->lock); mm_gc = &mpc8xxx_gc->mm_gc; gc = &mm_gc->gc; mm_gc->save_regs = mpc8xxx_gpio_save_regs; gc->ngpio = MPC8XXX_GPIO_PINS; gc->direction_input = mpc8xxx_gpio_dir_in; gc->direction_output = mpc8xxx_gpio_dir_out; if (of_device_is_compatible(np, "fsl,mpc8572-gpio")) gc->get = mpc8572_gpio_get; else gc->get = mpc8xxx_gpio_get; gc->set = mpc8xxx_gpio_set; gc->to_irq = mpc8xxx_gpio_to_irq; ret = of_mm_gpiochip_add(np, mm_gc); if (ret) goto err; hwirq = irq_of_parse_and_map(np, 0); if (hwirq == NO_IRQ) goto skip_irq; mpc8xxx_gc->irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS, &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS); if (!mpc8xxx_gc->irq) goto skip_irq; id = of_match_node(mpc8xxx_gpio_ids, np); if (id) mpc8xxx_gc->of_dev_id_data = id->data; mpc8xxx_gc->irq->host_data = mpc8xxx_gc; /* ack and mask all irqs */ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IMR, 0); irq_set_handler_data(hwirq, mpc8xxx_gc); irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); skip_irq: return; err: pr_err("%s: registration failed with status %d\n", np->full_name, ret); kfree(mpc8xxx_gc); return; } static int __init mpc8xxx_add_gpiochips(void) { struct device_node *np; for_each_matching_node(np, mpc8xxx_gpio_ids) mpc8xxx_add_controller(np); return 0; } arch_initcall(mpc8xxx_add_gpiochips);
gpl-2.0
Ziyann/android_kernel_samsung_espresso
arch/tile/kernel/proc.c
2840
3639
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/smp.h> #include <linux/seq_file.h> #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/timex.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/sysctl.h> #include <linux/hardirq.h> #include <linux/mman.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/homecache.h> #include <asm/hardwall.h> #include <arch/chip.h> /* * Support /proc/cpuinfo */ #define cpu_to_ptr(n) ((void *)((long)(n)+1)) #define ptr_to_cpu(p) ((long)(p) - 1) static int show_cpuinfo(struct seq_file *m, void *v) { int n = ptr_to_cpu(v); if (n == 0) { char buf[NR_CPUS*5]; cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask); seq_printf(m, "cpu count\t: %d\n", num_online_cpus()); seq_printf(m, "cpu list\t: %s\n", buf); seq_printf(m, "model name\t: %s\n", chip_model); seq_printf(m, "flags\t\t:\n"); /* nothing for now */ seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n", get_clock_rate() / 1000000, (get_clock_rate() % 1000000)); seq_printf(m, "bogomips\t: %lu.%02lu\n\n", loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100); } #ifdef CONFIG_SMP if (!cpu_online(n)) return 0; #endif seq_printf(m, "processor\t: %d\n", n); /* Print only num_online_cpus() blank lines total. */ if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids) seq_printf(m, "\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, }; /* * Support /proc/tile directory */ static int __init proc_tile_init(void) { struct proc_dir_entry *root = proc_mkdir("tile", NULL); if (root == NULL) return 0; proc_tile_hardwall_init(root); return 0; } arch_initcall(proc_tile_init); /* * Support /proc/sys/tile directory */ #ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */ static ctl_table unaligned_subtable[] = { { .procname = "enabled", .data = &unaligned_fixup, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec }, { .procname = "printk", .data = &unaligned_printk, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec }, { .procname = "count", .data = &unaligned_fixup_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec }, {} }; static ctl_table unaligned_table[] = { { .procname = "unaligned_fixup", .mode = 0555, .child = unaligned_subtable }, {} }; #endif static struct ctl_path tile_path[] = { { .procname = "tile" }, { } }; static int __init proc_sys_tile_init(void) { #ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */ register_sysctl_paths(tile_path, unaligned_table); #endif return 0; } arch_initcall(proc_sys_tile_init);
gpl-2.0
voidz777/omap
drivers/char/viotape.c
3352
26600
/* -*- linux-c -*- * drivers/char/viotape.c * * iSeries Virtual Tape * * Authors: Dave Boutcher <boutcher@us.ibm.com> * Ryan Arnold <ryanarn@us.ibm.com> * Colin Devilbiss <devilbis@us.ibm.com> * Stephen Rothwell * * (C) Copyright 2000-2004 IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) anyu later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * This routine provides access to tape drives owned and managed by an OS/400 * partition running on the same box as this Linux partition. * * All tape operations are performed by sending messages back and forth to * the OS/400 partition. The format of the messages is defined in * iseries/vio.h */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/mtio.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/major.h> #include <linux/completion.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <asm/firmware.h> #include <asm/vio.h> #include <asm/iseries/vio.h> #include <asm/iseries/hv_lp_event.h> #include <asm/iseries/hv_call_event.h> #include <asm/iseries/hv_lp_config.h> #define VIOTAPE_VERSION "1.2" #define VIOTAPE_MAXREQ 1 #define VIOTAPE_KERN_WARN KERN_WARNING "viotape: " #define VIOTAPE_KERN_INFO KERN_INFO "viotape: " static DEFINE_MUTEX(proc_viotape_mutex); static int viotape_numdev; /* * The minor number follows the conventions of the SCSI tape drives. The * rewind and mode are encoded in the minor #. We use this struct to break * them out */ struct viot_devinfo_struct { int devno; int mode; int rewind; }; #define VIOTAPOP_RESET 0 #define VIOTAPOP_FSF 1 #define VIOTAPOP_BSF 2 #define VIOTAPOP_FSR 3 #define VIOTAPOP_BSR 4 #define VIOTAPOP_WEOF 5 #define VIOTAPOP_REW 6 #define VIOTAPOP_NOP 7 #define VIOTAPOP_EOM 8 #define VIOTAPOP_ERASE 9 #define VIOTAPOP_SETBLK 10 #define VIOTAPOP_SETDENSITY 11 #define VIOTAPOP_SETPOS 12 #define VIOTAPOP_GETPOS 13 #define VIOTAPOP_SETPART 14 #define VIOTAPOP_UNLOAD 15 enum viotaperc { viotape_InvalidRange = 0x0601, viotape_InvalidToken = 0x0602, viotape_DMAError = 0x0603, viotape_UseError = 0x0604, viotape_ReleaseError = 0x0605, viotape_InvalidTape = 0x0606, viotape_InvalidOp = 0x0607, viotape_TapeErr = 0x0608, viotape_AllocTimedOut = 0x0640, viotape_BOTEnc = 0x0641, viotape_BlankTape = 0x0642, viotape_BufferEmpty = 0x0643, viotape_CleanCartFound = 0x0644, viotape_CmdNotAllowed = 0x0645, viotape_CmdNotSupported = 0x0646, viotape_DataCheck = 0x0647, viotape_DecompressErr = 0x0648, viotape_DeviceTimeout = 0x0649, viotape_DeviceUnavail = 0x064a, viotape_DeviceBusy = 0x064b, viotape_EndOfMedia = 0x064c, viotape_EndOfTape = 0x064d, viotape_EquipCheck = 0x064e, viotape_InsufficientRs = 0x064f, viotape_InvalidLogBlk = 0x0650, viotape_LengthError = 0x0651, viotape_LibDoorOpen = 0x0652, viotape_LoadFailure = 0x0653, viotape_NotCapable = 0x0654, viotape_NotOperational = 0x0655, viotape_NotReady = 0x0656, viotape_OpCancelled = 0x0657, viotape_PhyLinkErr = 0x0658, viotape_RdyNotBOT = 0x0659, viotape_TapeMark = 0x065a, viotape_WriteProt = 0x065b }; static const struct vio_error_entry viotape_err_table[] = { { viotape_InvalidRange, EIO, "Internal error" }, { viotape_InvalidToken, EIO, "Internal error" }, { viotape_DMAError, EIO, "DMA error" }, { viotape_UseError, EIO, "Internal error" }, { viotape_ReleaseError, EIO, "Internal error" }, { viotape_InvalidTape, EIO, "Invalid tape device" }, { viotape_InvalidOp, EIO, "Invalid operation" }, { viotape_TapeErr, EIO, "Tape error" }, { viotape_AllocTimedOut, EBUSY, "Allocate timed out" }, { viotape_BOTEnc, EIO, "Beginning of tape encountered" }, { viotape_BlankTape, EIO, "Blank tape" }, { viotape_BufferEmpty, EIO, "Buffer empty" }, { viotape_CleanCartFound, ENOMEDIUM, "Cleaning cartridge found" }, { viotape_CmdNotAllowed, EIO, "Command not allowed" }, { viotape_CmdNotSupported, EIO, "Command not supported" }, { viotape_DataCheck, EIO, "Data check" }, { viotape_DecompressErr, EIO, "Decompression error" }, { viotape_DeviceTimeout, EBUSY, "Device timeout" }, { viotape_DeviceUnavail, EIO, "Device unavailable" }, { viotape_DeviceBusy, EBUSY, "Device busy" }, { viotape_EndOfMedia, ENOSPC, "End of media" }, { viotape_EndOfTape, ENOSPC, "End of tape" }, { viotape_EquipCheck, EIO, "Equipment check" }, { viotape_InsufficientRs, EOVERFLOW, "Insufficient tape resources" }, { viotape_InvalidLogBlk, EIO, "Invalid logical block location" }, { viotape_LengthError, EOVERFLOW, "Length error" }, { viotape_LibDoorOpen, EBUSY, "Door open" }, { viotape_LoadFailure, ENOMEDIUM, "Load failure" }, { viotape_NotCapable, EIO, "Not capable" }, { viotape_NotOperational, EIO, "Not operational" }, { viotape_NotReady, EIO, "Not ready" }, { viotape_OpCancelled, EIO, "Operation cancelled" }, { viotape_PhyLinkErr, EIO, "Physical link error" }, { viotape_RdyNotBOT, EIO, "Ready but not beginning of tape" }, { viotape_TapeMark, EIO, "Tape mark" }, { viotape_WriteProt, EROFS, "Write protection error" }, { 0, 0, NULL }, }; /* Maximum number of tapes we support */ #define VIOTAPE_MAX_TAPE HVMAXARCHITECTEDVIRTUALTAPES #define MAX_PARTITIONS 4 /* defines for current tape state */ #define VIOT_IDLE 0 #define VIOT_READING 1 #define VIOT_WRITING 2 /* Our info on the tapes */ static struct { const char *rsrcname; const char *type; const char *model; } viotape_unitinfo[VIOTAPE_MAX_TAPE]; static struct mtget viomtget[VIOTAPE_MAX_TAPE]; static struct class *tape_class; static struct device *tape_device[VIOTAPE_MAX_TAPE]; /* * maintain the current state of each tape (and partition) * so that we know when to write EOF marks. */ static struct { unsigned char cur_part; unsigned char part_stat_rwi[MAX_PARTITIONS]; } state[VIOTAPE_MAX_TAPE]; /* We single-thread */ static struct semaphore reqSem; /* * When we send a request, we use this struct to get the response back * from the interrupt handler */ struct op_struct { void *buffer; dma_addr_t dmaaddr; size_t count; int rc; int non_blocking; struct completion com; struct device *dev; struct op_struct *next; }; static spinlock_t op_struct_list_lock; static struct op_struct *op_struct_list; /* forward declaration to resolve interdependence */ static int chg_state(int index, unsigned char new_state, struct file *file); /* procfs support */ static int proc_viotape_show(struct seq_file *m, void *v) { int i; seq_printf(m, "viotape driver version " VIOTAPE_VERSION "\n"); for (i = 0; i < viotape_numdev; i++) { seq_printf(m, "viotape device %d is iSeries resource %10.10s" "type %4.4s, model %3.3s\n", i, viotape_unitinfo[i].rsrcname, viotape_unitinfo[i].type, viotape_unitinfo[i].model); } return 0; } static int proc_viotape_open(struct inode *inode, struct file *file) { return single_open(file, proc_viotape_show, NULL); } static const struct file_operations proc_viotape_operations = { .owner = THIS_MODULE, .open = proc_viotape_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* Decode the device minor number into its parts */ void get_dev_info(struct inode *ino, struct viot_devinfo_struct *devi) { devi->devno = iminor(ino) & 0x1F; devi->mode = (iminor(ino) & 0x60) >> 5; /* if bit is set in the minor, do _not_ rewind automatically */ devi->rewind = (iminor(ino) & 0x80) == 0; } /* This is called only from the exit and init paths, so no need for locking */ static void clear_op_struct_pool(void) { while (op_struct_list) { struct op_struct *toFree = op_struct_list; op_struct_list = op_struct_list->next; kfree(toFree); } } /* Likewise, this is only called from the init path */ static int add_op_structs(int structs) { int i; for (i = 0; i < structs; ++i) { struct op_struct *new_struct = kmalloc(sizeof(*new_struct), GFP_KERNEL); if (!new_struct) { clear_op_struct_pool(); return -ENOMEM; } new_struct->next = op_struct_list; op_struct_list = new_struct; } return 0; } /* Allocate an op structure from our pool */ static struct op_struct *get_op_struct(void) { struct op_struct *retval; unsigned long flags; spin_lock_irqsave(&op_struct_list_lock, flags); retval = op_struct_list; if (retval) op_struct_list = retval->next; spin_unlock_irqrestore(&op_struct_list_lock, flags); if (retval) { memset(retval, 0, sizeof(*retval)); init_completion(&retval->com); } return retval; } /* Return an op structure to our pool */ static void free_op_struct(struct op_struct *op_struct) { unsigned long flags; spin_lock_irqsave(&op_struct_list_lock, flags); op_struct->next = op_struct_list; op_struct_list = op_struct; spin_unlock_irqrestore(&op_struct_list_lock, flags); } /* Map our tape return codes to errno values */ int tape_rc_to_errno(int tape_rc, char *operation, int tapeno) { const struct vio_error_entry *err; if (tape_rc == 0) return 0; err = vio_lookup_rc(viotape_err_table, tape_rc); printk(VIOTAPE_KERN_WARN "error(%s) 0x%04x on Device %d (%-10s): %s\n", operation, tape_rc, tapeno, viotape_unitinfo[tapeno].rsrcname, err->msg); return -err->errno; } /* Write */ static ssize_t viotap_write(struct file *file, const char *buf, size_t count, loff_t * ppos) { HvLpEvent_Rc hvrc; unsigned short flags = file->f_flags; int noblock = ((flags & O_NONBLOCK) != 0); ssize_t ret; struct viot_devinfo_struct devi; struct op_struct *op = get_op_struct(); if (op == NULL) return -ENOMEM; get_dev_info(file->f_path.dentry->d_inode, &devi); /* * We need to make sure we can send a request. We use * a semaphore to keep track of # requests in use. If * we are non-blocking, make sure we don't block on the * semaphore */ if (noblock) { if (down_trylock(&reqSem)) { ret = -EWOULDBLOCK; goto free_op; } } else down(&reqSem); /* Allocate a DMA buffer */ op->dev = tape_device[devi.devno]; op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr, GFP_ATOMIC); if (op->buffer == NULL) { printk(VIOTAPE_KERN_WARN "error allocating dma buffer for len %ld\n", count); ret = -EFAULT; goto up_sem; } /* Copy the data into the buffer */ if (copy_from_user(op->buffer, buf, count)) { printk(VIOTAPE_KERN_WARN "tape: error on copy from user\n"); ret = -EFAULT; goto free_dma; } op->non_blocking = noblock; init_completion(&op->com); op->count = count; hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_tape | viotapewrite, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)op, VIOVERSION << 16, ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0); if (hvrc != HvLpEvent_Rc_Good) { printk(VIOTAPE_KERN_WARN "hv error on op %d\n", (int)hvrc); ret = -EIO; goto free_dma; } if (noblock) return count; wait_for_completion(&op->com); if (op->rc) ret = tape_rc_to_errno(op->rc, "write", devi.devno); else { chg_state(devi.devno, VIOT_WRITING, file); ret = op->count; } free_dma: dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr); up_sem: up(&reqSem); free_op: free_op_struct(op); return ret; } /* read */ static ssize_t viotap_read(struct file *file, char *buf, size_t count, loff_t *ptr) { HvLpEvent_Rc hvrc; unsigned short flags = file->f_flags; struct op_struct *op = get_op_struct(); int noblock = ((flags & O_NONBLOCK) != 0); ssize_t ret; struct viot_devinfo_struct devi; if (op == NULL) return -ENOMEM; get_dev_info(file->f_path.dentry->d_inode, &devi); /* * We need to make sure we can send a request. We use * a semaphore to keep track of # requests in use. If * we are non-blocking, make sure we don't block on the * semaphore */ if (noblock) { if (down_trylock(&reqSem)) { ret = -EWOULDBLOCK; goto free_op; } } else down(&reqSem); chg_state(devi.devno, VIOT_READING, file); /* Allocate a DMA buffer */ op->dev = tape_device[devi.devno]; op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr, GFP_ATOMIC); if (op->buffer == NULL) { ret = -EFAULT; goto up_sem; } op->count = count; init_completion(&op->com); hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_tape | viotaperead, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)op, VIOVERSION << 16, ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0); if (hvrc != HvLpEvent_Rc_Good) { printk(VIOTAPE_KERN_WARN "tape hv error on op %d\n", (int)hvrc); ret = -EIO; goto free_dma; } wait_for_completion(&op->com); if (op->rc) ret = tape_rc_to_errno(op->rc, "read", devi.devno); else { ret = op->count; if (ret && copy_to_user(buf, op->buffer, ret)) { printk(VIOTAPE_KERN_WARN "error on copy_to_user\n"); ret = -EFAULT; } } free_dma: dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr); up_sem: up(&reqSem); free_op: free_op_struct(op); return ret; } /* ioctl */ static int viotap_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { HvLpEvent_Rc hvrc; int ret; struct viot_devinfo_struct devi; struct mtop mtc; u32 myOp; struct op_struct *op = get_op_struct(); if (op == NULL) return -ENOMEM; get_dev_info(file->f_path.dentry->d_inode, &devi); down(&reqSem); ret = -EINVAL; switch (cmd) { case MTIOCTOP: ret = -EFAULT; /* * inode is null if and only if we (the kernel) * made the request */ if (inode == NULL) memcpy(&mtc, (void *) arg, sizeof(struct mtop)); else if (copy_from_user((char *)&mtc, (char *)arg, sizeof(struct mtop))) goto free_op; ret = -EIO; switch (mtc.mt_op) { case MTRESET: myOp = VIOTAPOP_RESET; break; case MTFSF: myOp = VIOTAPOP_FSF; break; case MTBSF: myOp = VIOTAPOP_BSF; break; case MTFSR: myOp = VIOTAPOP_FSR; break; case MTBSR: myOp = VIOTAPOP_BSR; break; case MTWEOF: myOp = VIOTAPOP_WEOF; break; case MTREW: myOp = VIOTAPOP_REW; break; case MTNOP: myOp = VIOTAPOP_NOP; break; case MTEOM: myOp = VIOTAPOP_EOM; break; case MTERASE: myOp = VIOTAPOP_ERASE; break; case MTSETBLK: myOp = VIOTAPOP_SETBLK; break; case MTSETDENSITY: myOp = VIOTAPOP_SETDENSITY; break; case MTTELL: myOp = VIOTAPOP_GETPOS; break; case MTSEEK: myOp = VIOTAPOP_SETPOS; break; case MTSETPART: myOp = VIOTAPOP_SETPART; break; case MTOFFL: myOp = VIOTAPOP_UNLOAD; break; default: printk(VIOTAPE_KERN_WARN "MTIOCTOP called " "with invalid op 0x%x\n", mtc.mt_op); goto free_op; } /* * if we moved the head, we are no longer * reading or writing */ switch (mtc.mt_op) { case MTFSF: case MTBSF: case MTFSR: case MTBSR: case MTTELL: case MTSEEK: case MTREW: chg_state(devi.devno, VIOT_IDLE, file); } init_completion(&op->com); hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_tape | viotapeop, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)op, VIOVERSION << 16, ((u64)devi.devno << 48), 0, (((u64)myOp) << 32) | mtc.mt_count, 0); if (hvrc != HvLpEvent_Rc_Good) { printk(VIOTAPE_KERN_WARN "hv error on op %d\n", (int)hvrc); goto free_op; } wait_for_completion(&op->com); ret = tape_rc_to_errno(op->rc, "tape operation", devi.devno); goto free_op; case MTIOCGET: ret = -EIO; init_completion(&op->com); hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_tape | viotapegetstatus, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)op, VIOVERSION << 16, ((u64)devi.devno << 48), 0, 0, 0); if (hvrc != HvLpEvent_Rc_Good) { printk(VIOTAPE_KERN_WARN "hv error on op %d\n", (int)hvrc); goto free_op; } wait_for_completion(&op->com); /* Operation is complete - grab the error code */ ret = tape_rc_to_errno(op->rc, "get status", devi.devno); free_op_struct(op); up(&reqSem); if ((ret == 0) && copy_to_user((void *)arg, &viomtget[devi.devno], sizeof(viomtget[0]))) ret = -EFAULT; return ret; case MTIOCPOS: printk(VIOTAPE_KERN_WARN "Got an (unsupported) MTIOCPOS\n"); break; default: printk(VIOTAPE_KERN_WARN "got an unsupported ioctl 0x%0x\n", cmd); break; } free_op: free_op_struct(op); up(&reqSem); return ret; } static long viotap_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long rc; mutex_lock(&proc_viotape_mutex); rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg); mutex_unlock(&proc_viotape_mutex); return rc; } static int viotap_open(struct inode *inode, struct file *file) { HvLpEvent_Rc hvrc; struct viot_devinfo_struct devi; int ret; struct op_struct *op = get_op_struct(); if (op == NULL) return -ENOMEM; mutex_lock(&proc_viotape_mutex); get_dev_info(file->f_path.dentry->d_inode, &devi); /* Note: We currently only support one mode! */ if ((devi.devno >= viotape_numdev) || (devi.mode)) { ret = -ENODEV; goto free_op; } init_completion(&op->com); hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_tape | viotapeopen, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)op, VIOVERSION << 16, ((u64)devi.devno << 48), 0, 0, 0); if (hvrc != 0) { printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n", (int) hvrc); ret = -EIO; goto free_op; } wait_for_completion(&op->com); ret = tape_rc_to_errno(op->rc, "open", devi.devno); free_op: free_op_struct(op); mutex_unlock(&proc_viotape_mutex); return ret; } static int viotap_release(struct inode *inode, struct file *file) { HvLpEvent_Rc hvrc; struct viot_devinfo_struct devi; int ret = 0; struct op_struct *op = get_op_struct(); if (op == NULL) return -ENOMEM; init_completion(&op->com); get_dev_info(file->f_path.dentry->d_inode, &devi); if (devi.devno >= viotape_numdev) { ret = -ENODEV; goto free_op; } chg_state(devi.devno, VIOT_IDLE, file); if (devi.rewind) { hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_tape | viotapeop, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)op, VIOVERSION << 16, ((u64)devi.devno << 48), 0, ((u64)VIOTAPOP_REW) << 32, 0); wait_for_completion(&op->com); tape_rc_to_errno(op->rc, "rewind", devi.devno); } hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, viomajorsubtype_tape | viotapeclose, HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)op, VIOVERSION << 16, ((u64)devi.devno << 48), 0, 0, 0); if (hvrc != 0) { printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n", (int) hvrc); ret = -EIO; goto free_op; } wait_for_completion(&op->com); if (op->rc) printk(VIOTAPE_KERN_WARN "close failed\n"); free_op: free_op_struct(op); return ret; } const struct file_operations viotap_fops = { .owner = THIS_MODULE, .read = viotap_read, .write = viotap_write, .unlocked_ioctl = viotap_unlocked_ioctl, .open = viotap_open, .release = viotap_release, .llseek = noop_llseek, }; /* Handle interrupt events for tape */ static void vioHandleTapeEvent(struct HvLpEvent *event) { int tapeminor; struct op_struct *op; struct viotapelpevent *tevent = (struct viotapelpevent *)event; if (event == NULL) { /* Notification that a partition went away! */ if (!viopath_isactive(viopath_hostLp)) { /* TODO! Clean up */ } return; } tapeminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK; op = (struct op_struct *)event->xCorrelationToken; switch (tapeminor) { case viotapeopen: case viotapeclose: op->rc = tevent->sub_type_result; complete(&op->com); break; case viotaperead: op->rc = tevent->sub_type_result; op->count = tevent->len; complete(&op->com); break; case viotapewrite: if (op->non_blocking) { dma_free_coherent(op->dev, op->count, op->buffer, op->dmaaddr); free_op_struct(op); up(&reqSem); } else { op->rc = tevent->sub_type_result; op->count = tevent->len; complete(&op->com); } break; case viotapeop: case viotapegetpos: case viotapesetpos: case viotapegetstatus: if (op) { op->count = tevent->u.op.count; op->rc = tevent->sub_type_result; if (!op->non_blocking) complete(&op->com); } break; default: printk(VIOTAPE_KERN_WARN "weird ack\n"); } } static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id) { int i = vdev->unit_address; int j; struct device_node *node = vdev->dev.of_node; if (i >= VIOTAPE_MAX_TAPE) return -ENODEV; if (!node) return -ENODEV; if (i >= viotape_numdev) viotape_numdev = i + 1; tape_device[i] = &vdev->dev; viotape_unitinfo[i].rsrcname = of_get_property(node, "linux,vio_rsrcname", NULL); viotape_unitinfo[i].type = of_get_property(node, "linux,vio_type", NULL); viotape_unitinfo[i].model = of_get_property(node, "linux,vio_model", NULL); state[i].cur_part = 0; for (j = 0; j < MAX_PARTITIONS; ++j) state[i].part_stat_rwi[j] = VIOT_IDLE; device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL, "iseries!vt%d", i); device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL, "iseries!nvt%d", i); printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries " "resource %10.10s type %4.4s, model %3.3s\n", i, viotape_unitinfo[i].rsrcname, viotape_unitinfo[i].type, viotape_unitinfo[i].model); return 0; } static int viotape_remove(struct vio_dev *vdev) { int i = vdev->unit_address; device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80)); device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i)); return 0; } /** * viotape_device_table: Used by vio.c to match devices that we * support. */ static struct vio_device_id viotape_device_table[] __devinitdata = { { "byte", "IBM,iSeries-viotape" }, { "", "" } }; MODULE_DEVICE_TABLE(vio, viotape_device_table); static struct vio_driver viotape_driver = { .id_table = viotape_device_table, .probe = viotape_probe, .remove = viotape_remove, .driver = { .name = "viotape", .owner = THIS_MODULE, } }; int __init viotap_init(void) { int ret; if (!firmware_has_feature(FW_FEATURE_ISERIES)) return -ENODEV; op_struct_list = NULL; if ((ret = add_op_structs(VIOTAPE_MAXREQ)) < 0) { printk(VIOTAPE_KERN_WARN "couldn't allocate op structs\n"); return ret; } spin_lock_init(&op_struct_list_lock); sema_init(&reqSem, VIOTAPE_MAXREQ); if (viopath_hostLp == HvLpIndexInvalid) { vio_set_hostlp(); if (viopath_hostLp == HvLpIndexInvalid) { ret = -ENODEV; goto clear_op; } } ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2); if (ret) { printk(VIOTAPE_KERN_WARN "error on viopath_open to hostlp %d\n", ret); ret = -EIO; goto clear_op; } printk(VIOTAPE_KERN_INFO "vers " VIOTAPE_VERSION ", hosting partition %d\n", viopath_hostLp); vio_setHandler(viomajorsubtype_tape, vioHandleTapeEvent); ret = register_chrdev(VIOTAPE_MAJOR, "viotape", &viotap_fops); if (ret < 0) { printk(VIOTAPE_KERN_WARN "Error registering viotape device\n"); goto clear_handler; } tape_class = class_create(THIS_MODULE, "tape"); if (IS_ERR(tape_class)) { printk(VIOTAPE_KERN_WARN "Unable to allocat class\n"); ret = PTR_ERR(tape_class); goto unreg_chrdev; } ret = vio_register_driver(&viotape_driver); if (ret) goto unreg_class; proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL, &proc_viotape_operations); return 0; unreg_class: class_destroy(tape_class); unreg_chrdev: unregister_chrdev(VIOTAPE_MAJOR, "viotape"); clear_handler: vio_clearHandler(viomajorsubtype_tape); viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2); clear_op: clear_op_struct_pool(); return ret; } /* Give a new state to the tape object */ static int chg_state(int index, unsigned char new_state, struct file *file) { unsigned char *cur_state = &state[index].part_stat_rwi[state[index].cur_part]; int rc = 0; /* if the same state, don't bother */ if (*cur_state == new_state) return 0; /* write an EOF if changing from writing to some other state */ if (*cur_state == VIOT_WRITING) { struct mtop write_eof = { MTWEOF, 1 }; rc = viotap_ioctl(NULL, file, MTIOCTOP, (unsigned long)&write_eof); } *cur_state = new_state; return rc; } /* Cleanup */ static void __exit viotap_exit(void) { remove_proc_entry("iSeries/viotape", NULL); vio_unregister_driver(&viotape_driver); class_destroy(tape_class); unregister_chrdev(VIOTAPE_MAJOR, "viotape"); viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2); vio_clearHandler(viomajorsubtype_tape); clear_op_struct_pool(); } MODULE_LICENSE("GPL"); module_init(viotap_init); module_exit(viotap_exit);
gpl-2.0
pvittman/donkey-cm12
arch/ia64/sn/pci/tioca_provider.c
9240
18857
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/bitmap.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> #include <asm/sn/io.h> #include <asm/sn/pcidev.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/tioca_provider.h> u32 tioca_gart_found; EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ LIST_HEAD(tioca_list); EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */ static int tioca_gart_init(struct tioca_kernel *); /** * tioca_gart_init - Initialize SGI TIOCA GART * @tioca_common: ptr to common prom/kernel struct identifying the * * If the indicated tioca has devices present, initialize its associated * GART MMR's and kernel memory. */ static int tioca_gart_init(struct tioca_kernel *tioca_kern) { u64 ap_reg; u64 offset; struct page *tmp; struct tioca_common *tioca_common; struct tioca __iomem *ca_base; tioca_common = tioca_kern->ca_common; ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; if (list_empty(tioca_kern->ca_devices)) return 0; ap_reg = 0; /* * Validate aperature size */ switch (CA_APERATURE_SIZE >> 20) { case 4: ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */ break; case 8: ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */ break; case 16: ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */ break; case 32: ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */ break; case 64: ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */ break; case 128: ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */ break; case 256: ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */ break; case 512: ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */ break; case 1024: ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */ break; case 2048: ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */ break; case 4096: ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */ break; default: printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE); return -1; } /* * Set up other aperature parameters */ if (PAGE_SIZE >= 16384) { tioca_kern->ca_ap_pagesize = 16384; ap_reg |= CA_GART_PAGE_SIZE; } else { tioca_kern->ca_ap_pagesize = 4096; } tioca_kern->ca_ap_size = CA_APERATURE_SIZE; tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE; tioca_kern->ca_gart_entries = tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize; ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI); ap_reg |= tioca_kern->ca_ap_bus_base; /* * Allocate and set up the GART */ tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64); tmp = alloc_pages_node(tioca_kern->ca_closest_node, GFP_KERNEL | __GFP_ZERO, get_order(tioca_kern->ca_gart_size)); if (!tmp) { printk(KERN_ERR "%s: Could not allocate " "%llu bytes (order %d) for GART\n", __func__, tioca_kern->ca_gart_size, get_order(tioca_kern->ca_gart_size)); return -ENOMEM; } tioca_kern->ca_gart = page_address(tmp); tioca_kern->ca_gart_coretalk_addr = PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart)); /* * Compute PCI/AGP convenience fields */ offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE; tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE; tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE; tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize; tioca_kern->ca_pcigart_base = tioca_kern->ca_gart_coretalk_addr + offset; tioca_kern->ca_pcigart = &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start]; tioca_kern->ca_pcigart_entries = tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; tioca_kern->ca_pcigart_pagemap = kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); if (!tioca_kern->ca_pcigart_pagemap) { free_pages((unsigned long)tioca_kern->ca_gart, get_order(tioca_kern->ca_gart_size)); return -1; } offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE; tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE; tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE; tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize; tioca_kern->ca_gfxgart_base = tioca_kern->ca_gart_coretalk_addr + offset; tioca_kern->ca_gfxgart = &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start]; tioca_kern->ca_gfxgart_entries = tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize; /* * various control settings: * use agp op-combining * use GET semantics to fetch memory * participate in coherency domain * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 */ __sn_setq_relaxed(&ca_base->ca_control1, CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */ __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); __sn_setq_relaxed(&ca_base->ca_control2, (0x2ull << CA_GART_MEM_PARAM_SHFT)); tioca_kern->ca_gart_iscoherent = 1; __sn_clrq_relaxed(&ca_base->ca_control2, (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB)); /* * Unmask GART fetch error interrupts. Clear residual errors first. */ writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias); writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias); __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR); /* * Program the aperature and gart registers in TIOCA */ writeq(ap_reg, &ca_base->ca_gart_aperature); writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table); return 0; } /** * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions * @tioca_kernel: structure representing the CA * * Given a CA, scan all attached functions making sure they all support * FastWrite. If so, enable FastWrite for all functions and the CA itself. */ void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) { int cap_ptr; u32 reg; struct tioca __iomem *tioca_base; struct pci_dev *pdev; struct tioca_common *common; common = tioca_kern->ca_common; /* * Scan all vga controllers on this bus making sure they all * support FW. If not, return. */ list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) continue; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return; /* no AGP CAP means no FW */ pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg); if (!(reg & PCI_AGP_STATUS_FW)) return; /* function doesn't support FW */ } /* * Set fw for all vga fn's */ list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) continue; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg); reg |= PCI_AGP_COMMAND_FW; pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg); } /* * Set ca's fw to match */ tioca_base = (struct tioca __iomem*)common->ca_common.bs_base; __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE); } EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ /** * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode * @paddr: system physical address * * Map @paddr into 64-bit CA bus space. No device context is necessary. * Bits 53:0 come from the coretalk address. We just need to mask in the * following optional bits of the 64-bit pci address: * * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent) * 0x2 for PIO (non-coherent) * We will always use 0x1 * 55:55 - Swap bytes Currently unused */ static u64 tioca_dma_d64(unsigned long paddr) { dma_addr_t bus_addr; bus_addr = PHYS_TO_TIODMA(paddr); BUG_ON(!bus_addr); BUG_ON(bus_addr >> 54); /* Set upper nibble to Cache Coherent Memory op */ bus_addr |= (1UL << 60); return bus_addr; } /** * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode * @pdev: linux pci_dev representing the function * @paddr: system physical address * * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info. * * The CA agp 48 bit direct address falls out as follows: * * When direct mapping AGP addresses, the 48 bit AGP address is * constructed as follows: * * [47:40] - Low 8 bits of the page Node ID extracted from coretalk * address [47:40]. The upper 8 node bits are fixed * and come from the xxx register bits [5:0] * [39:38] - Chiplet ID extracted from coretalk address [39:38] * [37:00] - node offset extracted from coretalk address [37:00] * * Since the node id in general will be non-zero, and the chiplet id * will always be non-zero, it follows that the device must support * a dma mask of at least 0xffffffffff (40 bits) to target node 0 * and in general should be 0xffffffffffff (48 bits) to target nodes * up to 255. Nodes above 255 need the support of the xxx register, * and so a given CA can only directly target nodes in the range * xxx - xxx+255. */ static u64 tioca_dma_d48(struct pci_dev *pdev, u64 paddr) { struct tioca_common *tioca_common; struct tioca __iomem *ca_base; u64 ct_addr; dma_addr_t bus_addr; u32 node_upper; u64 agp_dma_extn; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; ct_addr = PHYS_TO_TIODMA(paddr); if (!ct_addr) return 0; bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL); node_upper = ct_addr >> 48; if (node_upper > 64) { printk(KERN_ERR "%s: coretalk addr 0x%p node id out " "of range\n", __func__, (void *)ct_addr); return 0; } agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn); if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { printk(KERN_ERR "%s: coretalk upper node (%u) " "mismatch with ca_agp_dma_addr_extn (%llu)\n", __func__, node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); return 0; } return bus_addr; } /** * tioca_dma_mapped - create a DMA mapping using a CA GART * @pdev: linux pci_dev representing the function * @paddr: host physical address to map * @req_size: len (bytes) to map * * Map @paddr into CA address space using the GART mechanism. The mapped * dma_addr_t is guaranteed to be contiguous in CA bus space. */ static dma_addr_t tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) { int ps, ps_shift, entry, entries, mapsize; u64 xio_addr, end_xio_addr; struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; dma_addr_t bus_addr = 0; struct tioca_dmamap *ca_dmamap; void *map; unsigned long flags; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; xio_addr = PHYS_TO_TIODMA(paddr); if (!xio_addr) return 0; spin_lock_irqsave(&tioca_kern->ca_lock, flags); /* * allocate a map struct */ ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC); if (!ca_dmamap) goto map_return; /* * Locate free entries that can hold req_size. Account for * unaligned start/length when allocating. */ ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */ ps_shift = ffs(ps) - 1; end_xio_addr = xio_addr + req_size - 1; entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1; map = tioca_kern->ca_pcigart_pagemap; mapsize = tioca_kern->ca_pcigart_entries; entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0); if (entry >= mapsize) { kfree(ca_dmamap); goto map_return; } bitmap_set(map, entry, entries); bus_addr = tioca_kern->ca_pciap_base + (entry * ps); ca_dmamap->cad_dma_addr = bus_addr; ca_dmamap->cad_gart_size = entries; ca_dmamap->cad_gart_entry = entry; list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); if (xio_addr % ps) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); bus_addr += xio_addr & (ps - 1); xio_addr &= ~(ps - 1); xio_addr += ps; entry++; } while (xio_addr < end_xio_addr) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); xio_addr += ps; entry++; } tioca_tlbflush(tioca_kern); map_return: spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); return bus_addr; } /** * tioca_dma_unmap - release CA mapping resources * @pdev: linux pci_dev representing the function * @bus_addr: bus address returned by an earlier tioca_dma_map * @dir: mapping direction (unused) * * Locate mapping resources associated with @bus_addr and release them. * For mappings created using the direct modes (64 or 48) there are no * resources to release. */ static void tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) { int i, entry; struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; struct tioca_dmamap *map; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); unsigned long flags; tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; /* return straight away if this isn't be a mapped address */ if (bus_addr < tioca_kern->ca_pciap_base || bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size)) return; spin_lock_irqsave(&tioca_kern->ca_lock, flags); list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list) if (map->cad_dma_addr == bus_addr) break; BUG_ON(map == NULL); entry = map->cad_gart_entry; for (i = 0; i < map->cad_gart_size; i++, entry++) { clear_bit(entry, tioca_kern->ca_pcigart_pagemap); tioca_kern->ca_pcigart[entry] = 0; } tioca_tlbflush(tioca_kern); list_del(&map->cad_list); spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); kfree(map); } /** * tioca_dma_map - map pages for PCI DMA * @pdev: linux pci_dev representing the function * @paddr: host physical address to map * @byte_count: bytes to map * * This is the main wrapper for mapping host physical pages to CA PCI space. * The mapping mode used is based on the devices dma_mask. As a last resort * use the GART mapped mode. */ static u64 tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { u64 mapaddr; /* * Not supported for now ... */ if (dma_flags & SN_DMA_MSI) return 0; /* * If card is 64 or 48 bit addressable, use a direct mapping. 32 * bit direct is so restrictive w.r.t. where the memory resides that * we don't use it even though CA has some support. */ if (pdev->dma_mask == ~0UL) mapaddr = tioca_dma_d64(paddr); else if (pdev->dma_mask == 0xffffffffffffUL) mapaddr = tioca_dma_d48(pdev, paddr); else mapaddr = 0; /* Last resort ... use PCI portion of CA GART */ if (mapaddr == 0) mapaddr = tioca_dma_mapped(pdev, paddr, byte_count); return mapaddr; } /** * tioca_error_intr_handler - SGI TIO CA error interrupt handler * @irq: unused * @arg: pointer to tioca_common struct for the given CA * * Handle a CA error interrupt. Simply a wrapper around a SAL call which * defers processing to the SGI prom. */ static irqreturn_t tioca_error_intr_handler(int irq, void *arg) { struct tioca_common *soft = arg; struct ia64_sal_retval ret_stuff; u64 segment; u64 busnum; ret_stuff.status = 0; ret_stuff.v0 = 0; segment = soft->ca_common.bs_persist_segment; busnum = soft->ca_common.bs_persist_busnum; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, segment, busnum, 0, 0, 0, 0, 0); return IRQ_HANDLED; } /** * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus * @prom_bussoft: Common prom/kernel struct representing the bus * * Replicates the tioca_common pointed to by @prom_bussoft in kernel * space. Allocates and initializes a kernel-only area for a given CA, * and sets up an irq for handling CA error interrupts. * * On successful setup, returns the kernel version of tioca_common back to * the caller. */ static void * tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) { struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; struct pci_bus *bus; /* sanity check prom rev */ if (is_shub1() && sn_sal_rev() < 0x0406) { printk (KERN_ERR "%s: SGI prom rev 4.06 or greater required " "for tioca support\n", __func__); return NULL; } /* * Allocate kernel bus soft and copy from prom. */ tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common), GFP_KERNEL); if (!tioca_common) return NULL; tioca_common->ca_common.bs_base = (unsigned long) ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base), sizeof(struct tioca_common)); /* init kernel-private area */ tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL); if (!tioca_kern) { kfree(tioca_common); return NULL; } tioca_kern->ca_common = tioca_common; spin_lock_init(&tioca_kern->ca_lock); INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); tioca_kern->ca_closest_node = nasid_to_cnodeid(tioca_common->ca_closest_nasid); tioca_common->ca_kernel_private = (u64) tioca_kern; bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, tioca_common->ca_common.bs_persist_busnum); BUG_ON(!bus); tioca_kern->ca_devices = &bus->devices; /* init GART */ if (tioca_gart_init(tioca_kern) < 0) { kfree(tioca_kern); kfree(tioca_common); return NULL; } tioca_gart_found++; list_add(&tioca_kern->ca_list, &tioca_list); if (request_irq(SGI_TIOCA_ERROR, tioca_error_intr_handler, IRQF_SHARED, "TIOCA error", (void *)tioca_common)) printk(KERN_WARNING "%s: Unable to get irq %d. " "Error interrupts won't be routed for TIOCA bus %d\n", __func__, SGI_TIOCA_ERROR, (int)tioca_common->ca_common.bs_persist_busnum); irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq); sn_set_err_irq_affinity(SGI_TIOCA_ERROR); /* Setup locality information */ controller->node = tioca_kern->ca_closest_node; return tioca_common; } static struct sn_pcibus_provider tioca_pci_interfaces = { .dma_map = tioca_dma_map, .dma_map_consistent = tioca_dma_map, .dma_unmap = tioca_dma_unmap, .bus_fixup = tioca_bus_fixup, .force_interrupt = NULL, .target_interrupt = NULL }; /** * tioca_init_provider - init SN PCI provider ops for TIO CA */ int tioca_init_provider(void) { sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces; return 0; }
gpl-2.0
donkeykang/donkeyk
fs/hpfs/dentry.c
10520
1540
/* * linux/fs/hpfs/dentry.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * dcache operations */ #include "hpfs_fn.h" /* * Note: the dentry argument is the parent dentry. */ static int hpfs_hash_dentry(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { unsigned long hash; int i; unsigned l = qstr->len; if (l == 1) if (qstr->name[0]=='.') goto x; if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; hpfs_adjust_length(qstr->name, &l); /*if (hpfs_chk_name(qstr->name,&l))*/ /*return -ENAMETOOLONG;*/ /*return -ENOENT;*/ x: hash = init_name_hash(); for (i = 0; i < l; i++) hash = partial_name_hash(hpfs_upcase(hpfs_sb(dentry->d_sb)->sb_cp_table,qstr->name[i]), hash); qstr->hash = end_name_hash(hash); return 0; } static int hpfs_compare_dentry(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { unsigned al = len; unsigned bl = name->len; hpfs_adjust_length(str, &al); /*hpfs_adjust_length(b->name, &bl);*/ /* * 'str' is the nane of an already existing dentry, so the name * must be valid. 'name' must be validated first. */ if (hpfs_chk_name(name->name, &bl)) return 1; if (hpfs_compare_names(parent->d_sb, str, al, name->name, bl, 0)) return 1; return 0; } const struct dentry_operations hpfs_dentry_operations = { .d_hash = hpfs_hash_dentry, .d_compare = hpfs_compare_dentry, };
gpl-2.0
cannondalev2000/kernel_lge_msm8974
arch/powerpc/boot/redboot-8xx.c
14104
1454
/* * RedBoot firmware support * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "redboot.h" #include "fsl-soc.h" #include "io.h" static bd_t bd; BSS_STACK(4096); #define MHZ(x) ((x + 500000) / 1000000) static void platform_fixups(void) { void *node; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 16, bd.bi_busfreq); node = finddevice("/soc/cpm/brg"); if (node) { printf("BRG clock-frequency <- 0x%x (%dMHz)\r\n", bd.bi_busfreq, MHZ(bd.bi_busfreq)); setprop(node, "clock-frequency", &bd.bi_busfreq, 4); } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { memcpy(&bd, (char *)r3, sizeof(bd)); if (bd.bi_tag != 0x42444944) return; simple_alloc_init(_end, bd.bi_memstart + bd.bi_memsize - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; loader_info.cmdline = (char *)bd.bi_cmdline; loader_info.cmdline_len = strlen((char *)bd.bi_cmdline); }
gpl-2.0
XileForce/Vindicator-S6
arch/powerpc/boot/ep8248e.c
14104
1260
/* * Embedded Planet EP8248E with PlanetCore firmware * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "planetcore.h" #include "pq2.h" static char *table; static u64 mem_size; #include <io.h> static void platform_fixups(void) { u64 val; dt_fixup_memory(0, mem_size); planetcore_set_mac_addrs(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { printf("No PlanetCore crystal frequency key.\r\n"); return; } pq2_fixup_clocks(val); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { table = (char *)r3; planetcore_prepare_table(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) return; mem_size *= 1024 * 1024; simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); planetcore_set_stdout_path(table); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
sakuramilk/linux-2.6.32.y
arch/powerpc/boot/cuboot-824x.c
14104
1224
/* * Old U-boot compatibility for 824x * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_824x #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_addresses(bd.bi_enetaddr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); soc = find_node_by_devtype(NULL, "soc"); if (soc) { void *serial = NULL; setprop(soc, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != soc) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
vlw/android_kernel_samsung_msm8916_A3
arch/powerpc/boot/ep8248e.c
14104
1260
/* * Embedded Planet EP8248E with PlanetCore firmware * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "planetcore.h" #include "pq2.h" static char *table; static u64 mem_size; #include <io.h> static void platform_fixups(void) { u64 val; dt_fixup_memory(0, mem_size); planetcore_set_mac_addrs(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) { printf("No PlanetCore crystal frequency key.\r\n"); return; } pq2_fixup_clocks(val); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { table = (char *)r3; planetcore_prepare_table(table); if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size)) return; mem_size *= 1024 * 1024; simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64); fdt_init(_dtb_start); planetcore_set_stdout_path(table); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
Renzo-Olivares/android_43_kernel_htc_monarudo
drivers/gpu/msm/kgsl_debugfs.c
25
8941
/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/debugfs.h> #include "kgsl.h" #include "kgsl_device.h" #include "kgsl_sharedmem.h" #define KGSL_LOG_LEVEL_DEFAULT 3 #define KGSL_LOG_LEVEL_MAX 7 struct dentry *kgsl_debugfs_dir; static struct dentry *pm_d_debugfs; struct dentry *proc_d_debugfs; static int pm_dump_set(void *data, u64 val) { struct kgsl_device *device = data; if (val) { mutex_lock(&device->mutex); kgsl_postmortem_dump(device, 1); mutex_unlock(&device->mutex); } return 0; } DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops, NULL, pm_dump_set, "%llu\n"); static int pm_regs_enabled_set(void *data, u64 val) { struct kgsl_device *device = data; device->pm_regs_enabled = val ? 1 : 0; return 0; } static int pm_regs_enabled_get(void *data, u64 *val) { struct kgsl_device *device = data; *val = device->pm_regs_enabled; return 0; } static int pm_ib_enabled_set(void *data, u64 val) { struct kgsl_device *device = data; device->pm_ib_enabled = val ? 1 : 0; return 0; } static int pm_ib_enabled_get(void *data, u64 *val) { struct kgsl_device *device = data; *val = device->pm_ib_enabled; return 0; } static int pm_enabled_set(void *data, u64 val) { struct kgsl_device *device = data; device->pm_dump_enable = val; return 0; } static int pm_enabled_get(void *data, u64 *val) { struct kgsl_device *device = data; *val = device->pm_dump_enable; return 0; } DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops, pm_regs_enabled_get, pm_regs_enabled_set, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(pm_ib_enabled_fops, pm_ib_enabled_get, pm_ib_enabled_set, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(pm_enabled_fops, pm_enabled_get, pm_enabled_set, "%llu\n"); static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val) { *log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX); return 0; } #define KGSL_DEBUGFS_LOG(__log) \ static int __log ## _set(void *data, u64 val) \ { \ struct kgsl_device *device = data; \ return kgsl_log_set(&device->__log, data, val); \ } \ static int __log ## _get(void *data, u64 *val) \ { \ struct kgsl_device *device = data; \ *val = device->__log; \ return 0; \ } \ DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops, \ __log ## _get, __log ## _set, "%llu\n"); \ KGSL_DEBUGFS_LOG(drv_log); KGSL_DEBUGFS_LOG(cmd_log); KGSL_DEBUGFS_LOG(ctxt_log); KGSL_DEBUGFS_LOG(mem_log); KGSL_DEBUGFS_LOG(pwr_log); KGSL_DEBUGFS_LOG(ft_log); static int memfree_hist_print(struct seq_file *s, void *unused) { void *base = kgsl_driver.memfree_hist.base_hist_rb; struct kgsl_memfree_hist_elem *wptr = kgsl_driver.memfree_hist.wptr; struct kgsl_memfree_hist_elem *p; char str[16]; seq_printf(s, "%8s %8s %8s %11s\n", "pid", "gpuaddr", "size", "flags"); mutex_lock(&kgsl_driver.memfree_hist_mutex); p = wptr; for (;;) { kgsl_get_memory_usage(str, sizeof(str), p->flags); if (p->size) seq_printf(s, "%8d %08x %8d %11s\n", p->pid, p->gpuaddr, p->size, str); p++; if ((void *)p >= base + kgsl_driver.memfree_hist.size) p = (struct kgsl_memfree_hist_elem *) base; if (p == kgsl_driver.memfree_hist.wptr) break; } mutex_unlock(&kgsl_driver.memfree_hist_mutex); return 0; } static int memfree_hist_open(struct inode *inode, struct file *file) { return single_open(file, memfree_hist_print, inode->i_private); } static const struct file_operations memfree_hist_fops = { .open = memfree_hist_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void kgsl_device_debugfs_init(struct kgsl_device *device) { if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir)) device->d_debugfs = debugfs_create_dir(device->name, kgsl_debugfs_dir); if (!device->d_debugfs || IS_ERR(device->d_debugfs)) return; device->cmd_log = KGSL_LOG_LEVEL_DEFAULT; device->ctxt_log = KGSL_LOG_LEVEL_DEFAULT; device->drv_log = KGSL_LOG_LEVEL_DEFAULT; device->mem_log = KGSL_LOG_LEVEL_DEFAULT; device->pwr_log = KGSL_LOG_LEVEL_DEFAULT; device->ft_log = KGSL_LOG_LEVEL_DEFAULT; debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device, &cmd_log_fops); debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device, &ctxt_log_fops); debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device, &drv_log_fops); debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device, &mem_log_fops); debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device, &pwr_log_fops); debugfs_create_file("memfree_history", 0444, device->d_debugfs, device, &memfree_hist_fops); pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs); if (IS_ERR(pm_d_debugfs)) return; debugfs_create_file("dump", 0600, pm_d_debugfs, device, &pm_dump_fops); debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device, &pm_regs_enabled_fops); debugfs_create_file("ib_enabled", 0644, pm_d_debugfs, device, &pm_ib_enabled_fops); device->pm_dump_enable = 0; debugfs_create_file("enable", 0644, pm_d_debugfs, device, &pm_enabled_fops); } static const char * const memtype_strings[] = { "gpumem", "pmem", "ashmem", "usermap", "ion", }; static const char *memtype_str(int memtype) { if (memtype < ARRAY_SIZE(memtype_strings)) return memtype_strings[memtype]; return "unknown"; } static char get_alignflag(const struct kgsl_memdesc *m) { int align = kgsl_memdesc_get_align(m); if (align >= ilog2(SZ_1M)) return 'L'; else if (align >= ilog2(SZ_64K)) return 'l'; return '-'; } static char get_cacheflag(const struct kgsl_memdesc *m) { static const char table[] = { [KGSL_CACHEMODE_WRITECOMBINE] = '-', [KGSL_CACHEMODE_UNCACHED] = 'u', [KGSL_CACHEMODE_WRITEBACK] = 'b', [KGSL_CACHEMODE_WRITETHROUGH] = 't', }; return table[kgsl_memdesc_get_cachemode(m)]; } static void print_mem_entry(struct seq_file *s, struct kgsl_mem_entry *entry) { char flags[6]; char usage[16]; struct kgsl_memdesc *m = &entry->memdesc; flags[0] = kgsl_memdesc_is_global(m) ? 'g' : '-'; flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-'; flags[2] = get_alignflag(m); flags[3] = get_cacheflag(m); flags[4] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-'; flags[5] = '\0'; kgsl_get_memory_usage(usage, sizeof(usage), m->flags); seq_printf(s, "%08x %8d %5d %5s %10s %16s %5d\n", m->gpuaddr, m->size, entry->id, flags, memtype_str(entry->memtype), usage, m->sglen); } static int process_mem_print(struct seq_file *s, void *unused) { struct kgsl_mem_entry *entry; struct rb_node *node; struct kgsl_process_private *private = s->private; int next = 0; seq_printf(s, "%8s %8s %5s %5s %10s %16s %5s\n", "gpuaddr", "size", "id", "flags", "type", "usage", "sglen"); spin_lock(&private->mem_lock); for (node = rb_first(&private->mem_rb); node; node = rb_next(node)) { entry = rb_entry(node, struct kgsl_mem_entry, node); print_mem_entry(s, entry); } spin_unlock(&private->mem_lock); while (1) { rcu_read_lock(); entry = idr_get_next(&private->mem_idr, &next); rcu_read_unlock(); if (entry == NULL) break; if (entry->memdesc.gpuaddr == 0) print_mem_entry(s, entry); next++; } return 0; } static int process_mem_open(struct inode *inode, struct file *file) { return single_open(file, process_mem_print, inode->i_private); } static const struct file_operations process_mem_fops = { .open = process_mem_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void kgsl_process_init_debugfs(struct kgsl_process_private *private) { unsigned char name[16]; snprintf(name, sizeof(name), "%d", private->pid); private->debug_root = debugfs_create_dir(name, proc_d_debugfs); debugfs_create_file("mem", 0400, private->debug_root, private, &process_mem_fops); } void kgsl_core_debugfs_init(void) { kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0); proc_d_debugfs = debugfs_create_dir("proc", kgsl_debugfs_dir); } void kgsl_core_debugfs_close(void) { debugfs_remove_recursive(kgsl_debugfs_dir); }
gpl-2.0
lyfkevin/Wind_iproj_ICS_kernel
lge/com_device/felica/felica_rws.c
25
7466
/* * felica_rws.c * */ /* * INCLUDE FILES FOR MODULE */ #include "felica_rws.h" #include "felica_gpio.h" #include "felica_test.h" /* * DEFINE */ enum{ RWS_AVAILABLE = 0, RWS_NOT_AVAILABLE, }; /* Debug message feature */ /* #define FEATURE_DEBUG_LOW */ /* Debug intent */ /* #define FELICA_INTENT "my.andr.u5/.FeliCaTest" */ #define FELICA_INTENT "com.felicanetworks.mfc/com.felicanetworks.adhoc.AdhocReceiver" /* * INTERNAL DEFINITION */ static int isopen = 0; // 0 : No open 1 : Open /* * FUNCTION PROTOTYPE */ static void felica_int_low_work(struct work_struct *data); static DECLARE_DELAYED_WORK(felica_int_work, felica_int_low_work); /* * FUNCTION DEFINITION */ static int invoke_felica_apk(void) { char *argv[] = { "/system/bin/sh","/system/bin/am", "start", "-n", FELICA_INTENT, "--activity-clear-top", NULL }; static char *envp[] = {FELICA_LD_LIBRARY_PATH,FELICA_BOOTCLASSPATH,FELICA_PATH,NULL }; int rc = 0; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] invoke felica app... \n"); #endif rc = call_usermodehelper( argv[0], argv, envp, UMH_WAIT_EXEC ); FELICA_DEBUG_MSG("[FELICA_RWS] felica app result : %d \n", rc); return rc; } static void felica_int_low_work(struct work_struct *data) { int rc = 0; lock_felica_wake_lock(); disable_irq_nosync(gpio_to_irq(GPIO_FELICA_INT)); usermodehelper_enable(); //#ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_int_low_work - start \n"); //#endif rc = invoke_felica_apk(); if(rc) { FELICA_DEBUG_MSG("[FELICA_RWS] Error - invoke app \n"); unlock_felica_wake_lock(); } //#ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_int_low_work - end \n"); //#endif enable_irq(gpio_to_irq(GPIO_FELICA_INT)); } static irqreturn_t felica_int_low_isr(int irq, void *dev_id) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_int_low_isr - start \n"); #endif schedule_delayed_work(&felica_int_work,0); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_int_low_isr - end \n"); #endif return IRQ_HANDLED; } /* * Description : MFC calls this function using close method(void open()) of FileOutputStream class * When this fuction is excuted, set PON to Low. * Input : None * Output : Success : 0 Fail : Other */ static int felica_rws_open (struct inode *inode, struct file *fp) { int rc = 0; if(1 == isopen) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_open - already open \n"); #endif return -1; } else { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_open - start \n"); #endif isopen = 1; } rc = felica_gpio_open(GPIO_FELICA_INT, GPIO_DIRECTION_IN, GPIO_HIGH_VALUE); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_open - end \n"); #endif #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_open - result(%d) \n",result_open_rws); return result_open_rws; #else return rc; #endif } /* * Description: MFC calls this function using read method(int read()) of FileInputStream class * Input: None * Output: INT low : RWS not available INT high : available */ static ssize_t felica_rws_read(struct file *fp, char *buf, size_t count, loff_t *pos) { int rc = 0; int getvalue = GPIO_HIGH_VALUE; /* Default status*/ #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_read - start \n"); #endif /* Check error */ if(NULL == fp) { FELICA_DEBUG_MSG("[FELICA_RWS] ERROR fp \n"); return -1; } if(NULL == buf) { FELICA_DEBUG_MSG("[FELICA_RWS] ERROR buf \n"); return -1; } if(1 != count) { FELICA_DEBUG_MSG("[FELICA_RWS] ERROR count \n"); return -1; } if(NULL == pos) { FELICA_DEBUG_MSG("[FELICA_RWS] ERROR file \n"); return -1; } /* Get GPIO value */ getvalue = felica_gpio_read(GPIO_FELICA_INT); if((GPIO_LOW_VALUE != getvalue)&&(GPIO_HIGH_VALUE != getvalue)) { FELICA_DEBUG_MSG("[FELICA_RFS] ERROR - getvalue is out of range \n"); return -1; } /* Change GPIO value to RWS value */ getvalue = getvalue ? RWS_AVAILABLE : RWS_NOT_AVAILABLE; FELICA_DEBUG_MSG("[FELICA_RWS] RWS status : %d \n", getvalue); /* Copy value to user memory */ rc = copy_to_user((void *)buf, (void *)&getvalue, count); if(rc) { FELICA_DEBUG_MSG("[FELICA_RWS] ERROR - copy_to_user \n"); return rc; } #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rfs_read - end \n"); #endif #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG("[FELICA_RWS] felica_rfs_read - result(%d) \n",result_read_rws); if(result_read_rws != -1) result_read_rws = count; return result_read_rws; #else return count; #endif } /* * Description : MFC calls this function using close method(void close()) of FileOutputStream class * When this fuction is excuted, set PON to Low. * Input : None * Output : Success : 0 Fail : Other */ static int felica_rws_release (struct inode *inode, struct file *fp) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_release - start \n"); #endif if(0 == isopen) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_release - not open \n"); #endif return -1; } else { isopen = 0; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_release - end \n"); #endif } #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_release - result(%d) \n",result_close_rws); return result_close_rws; #else return 0; #endif } /* * STRUCT DEFINITION */ static struct file_operations felica_rws_fops = { .owner = THIS_MODULE, .open = felica_rws_open, .read = felica_rws_read, .release = felica_rws_release, }; static struct miscdevice felica_rws_device = { MISC_DYNAMIC_MINOR, FELICA_RWS_NAME, &felica_rws_fops }; static int felica_rws_init(void) { int rc = 0; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_init - start \n"); #endif /* register the device file */ rc = misc_register(&felica_rws_device); if (rc) { FELICA_DEBUG_MSG("[FELICA_RWS] FAIL!! can not register felica_int \n"); return rc; } rc= request_irq(gpio_to_irq(GPIO_FELICA_INT), felica_int_low_isr, IRQF_TRIGGER_FALLING|IRQF_NO_SUSPEND, FELICA_RWS_NAME, NULL); if (rc) { FELICA_DEBUG_MSG("[FELICA_RWS] FAIL!! can not request_irq \n"); return rc; } irq_set_irq_wake(gpio_to_irq(GPIO_FELICA_INT),1); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RWS] felica_rws_init - end \n"); #endif return 0; } static void felica_rws_exit(void) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_INT] felica_rws_exit - start \n"); #endif free_irq(gpio_to_irq(GPIO_FELICA_INT), NULL); misc_deregister(&felica_rws_device); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_INT] felica_rws_exit - end \n"); #endif } module_init(felica_rws_init); module_exit(felica_rws_exit); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
SimonKagstrom/mci500h-linux-2.4.27
net/ipx/af_spx.c
25
24627
/* * This module implements the (SPP-derived) Sequenced Packet eXchange * (SPX) protocol for Linux 2.1.X as specified in * NetWare SPX Services Specification, Semantics and API * Revision: 1.00 * Revision Date: February 9, 1993 * * Developers: * Jay Schulist <jschlst@samba.org> * Jim Freeman <jfree@caldera.com> * * Changes: * Alan Cox : Fixed an skb_unshare check for NULL * that crashed it under load. Renamed and * made static the ipx ops. Removed the hack * ipx methods interface. Dropped AF_SPX - its * the wrong abstraction. * Eduardo Trapani : Added a check for the return value of * ipx_if_offset that crashed sock_alloc_send_skb. * Added spx_datagram_poll() so that select() * works now on SPX sockets. Added updating * of the alloc count to follow rmt_seq. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * None of the authors or maintainers or their employers admit * liability nor provide warranty for any of this software. * This material is provided "as is" and at no charge. */ #include <linux/module.h> #include <net/ipx.h> #include <net/spx.h> #include <net/sock.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <linux/uio.h> #include <linux/unistd.h> #include <linux/poll.h> static struct proto_ops *ipx_operations; static struct proto_ops spx_ops; static __u16 connids; /* Functions needed for SPX connection start up */ static int spx_transmit(struct sock *sk,struct sk_buff *skb,int type,int len); static void spx_retransmit(unsigned long data); static void spx_watchdog(unsigned long data); void spx_rcv(struct sock *sk, int bytes); extern void ipx_remove_socket(struct sock *sk); /* Datagram poll: the same code as datagram_poll() in net/core but the right spx buffers are looked at and there is no question on the type of the socket */ static unsigned int spx_datagram_poll(struct file * file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; unsigned int mask; poll_wait(file, sk->sleep, wait); mask = 0; /* exceptional events? */ if (sk->err || !skb_queue_empty(&sk->error_queue)) mask |= POLLERR; if (sk->shutdown & RCV_SHUTDOWN) mask |= POLLHUP; /* readable? */ if (!skb_queue_empty(&pdata->rcv_queue)) mask |= POLLIN | POLLRDNORM; /* Need to check for termination and startup */ if (sk->state==TCP_CLOSE) mask |= POLLHUP; /* connection hasn't started yet? */ if (sk->state == TCP_SYN_SENT) return mask; /* writable? */ if (sock_writeable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE,&sk->socket->flags); return mask; } /* Create the SPX specific data */ static int spx_sock_init(struct sock *sk) { struct spx_opt *pdata = &sk->tp_pinfo.af_spx; pdata->state = SPX_CLOSED; pdata->sequence = 0; pdata->acknowledge = 0; pdata->source_connid = htons(connids); pdata->rmt_seq = 0; connids++; pdata->owner = (void *)sk; pdata->sndbuf = sk->sndbuf; pdata->watchdog.function = spx_watchdog; pdata->watchdog.data = (unsigned long)sk; pdata->wd_interval = VERIFY_TIMEOUT; pdata->retransmit.function = spx_retransmit; pdata->retransmit.data = (unsigned long)sk; pdata->retransmits = 0; pdata->retries = 0; pdata->max_retries = RETRY_COUNT; skb_queue_head_init(&pdata->rcv_queue); skb_queue_head_init(&pdata->transmit_queue); skb_queue_head_init(&pdata->retransmit_queue); return (0); } static int spx_create(struct socket *sock, int protocol) { struct sock *sk; /* * Called on connection receive so cannot be GFP_KERNEL */ sk = sk_alloc(PF_IPX, GFP_ATOMIC, 1); if(sk == NULL) return (-ENOMEM); switch(sock->type) { case SOCK_SEQPACKET: sock->ops = &spx_ops; break; default: sk_free(sk); return (-ESOCKTNOSUPPORT); } sock_init_data(sock, sk); spx_sock_init(sk); sk->data_ready = spx_rcv; sk->destruct = NULL; sk->no_check = 1; MOD_INC_USE_COUNT; return (0); } void spx_close_socket(struct sock *sk) { struct spx_opt *pdata = &sk->tp_pinfo.af_spx; pdata->state = SPX_CLOSED; sk->state = TCP_CLOSE; del_timer(&pdata->retransmit); del_timer(&pdata->watchdog); } void spx_destroy_socket(struct sock *sk) { struct spx_opt *pdata = &sk->tp_pinfo.af_spx; struct sk_buff *skb; ipx_remove_socket(sk); while((skb = skb_dequeue(&sk->receive_queue)) != NULL) kfree_skb(skb); while((skb = skb_dequeue(&pdata->transmit_queue)) != NULL) kfree_skb(skb); while((skb = skb_dequeue(&pdata->retransmit_queue)) != NULL) kfree_skb(skb); while((skb = skb_dequeue(&pdata->rcv_queue)) != NULL) kfree_skb(skb); sk_free(sk); MOD_DEC_USE_COUNT; } /* Release an SPX socket */ static int spx_release(struct socket *sock) { struct sock *sk = sock->sk; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; if(sk == NULL) return (0); if(!sk->dead) sk->state_change(sk); sk->dead = 1; if(pdata->state != SPX_CLOSED) { spx_transmit(sk, NULL, DISCON, 0); spx_close_socket(sk); } sock->sk = NULL; sk->socket = NULL; spx_destroy_socket(sk); return (0); } /* Move a socket into listening state. */ static int spx_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; if(sock->state != SS_UNCONNECTED) return (-EINVAL); if(sock->type != SOCK_SEQPACKET) return (-EOPNOTSUPP); if(sk->zapped != 0) return (-EAGAIN); sk->max_ack_backlog = backlog; if(sk->state != TCP_LISTEN) { sk->ack_backlog = 0; sk->state = TCP_LISTEN; } sk->socket->flags |= __SO_ACCEPTCON; return (0); } /* Accept a pending SPX connection */ static int spx_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk; struct sock *newsk; struct sk_buff *skb; int err; if(sock->sk == NULL) return (-EINVAL); sk = sock->sk; if((sock->state != SS_UNCONNECTED) || !(sock->flags & __SO_ACCEPTCON)) return (-EINVAL); if(sock->type != SOCK_SEQPACKET) return (-EOPNOTSUPP); if(sk->state != TCP_LISTEN) return (-EINVAL); cli(); do { skb = skb_dequeue(&sk->receive_queue); if(skb == NULL) { if(flags & O_NONBLOCK) { sti(); return (-EWOULDBLOCK); } interruptible_sleep_on(sk->sleep); if(signal_pending(current)) { sti(); return (-ERESTARTSYS); } } } while (skb == NULL); newsk = skb->sk; newsk->pair = NULL; sti(); err = spx_transmit(newsk, skb, CONACK, 0); /* Connection ACK */ if(err) return (err); /* Now attach up the new socket */ sock->sk = NULL; sk->ack_backlog--; newsock->sk = newsk; newsk->state = TCP_ESTABLISHED; newsk->protinfo.af_ipx.dest_addr = newsk->tp_pinfo.af_spx.dest_addr; return (0); } /* Build a connection to an SPX socket */ static int spx_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; struct sockaddr_ipx src; struct sk_buff *skb; int size, err; size = sizeof(src); err = ipx_operations->getname(sock, (struct sockaddr *)&src, &size, 0); if(err) return (err); pdata->source_addr.net = src.sipx_network; memcpy(pdata->source_addr.node, src.sipx_node, IPX_NODE_LEN); pdata->source_addr.sock = (unsigned short)src.sipx_port; err = ipx_operations->connect(sock, uaddr, addr_len, flags); if(err) return (err); pdata->dest_addr = sk->protinfo.af_ipx.dest_addr; pdata->state = SPX_CONNECTING; sock->state = SS_CONNECTING; sk->state = TCP_SYN_SENT; /* Send Connection request */ err = spx_transmit(sk, NULL, CONREQ, 0); if(err) return (err); cli(); do { skb = skb_dequeue(&sk->receive_queue); if(skb == NULL) { if(flags & O_NONBLOCK) { sti(); return (-EWOULDBLOCK); } interruptible_sleep_on(sk->sleep); if(signal_pending(current)) { sti(); return (-ERESTARTSYS); } } } while (skb == NULL); if(pdata->state == SPX_CLOSED) { sti(); del_timer(&pdata->watchdog); return (-ETIMEDOUT); } sock->state = SS_CONNECTED; sk->state = TCP_ESTABLISHED; kfree_skb(skb); sti(); return (0); } /* * Calculate the timeout for a packet. Thankfully SPX has a large * fudge factor (3/4 secs) and does not pay much attention to RTT. * As we simply have a default retry time of 1*HZ and a max retry * time of 5*HZ. Between those values we increase the timeout based * on the number of retransmit tries. * * FixMe: This is quite fake, but will work for now. (JS) */ static inline unsigned long spx_calc_rtt(int tries) { if(tries < 1) return (RETRY_TIME); if(tries > 5) return (MAX_RETRY_DELAY); return (tries * HZ); } static int spx_route_skb(struct spx_opt *pdata, struct sk_buff *skb, int type) { struct sk_buff *skb2; int err = 0; skb = skb_unshare(skb, GFP_ATOMIC); if(skb == NULL) return (-ENOBUFS); switch(type) { case (CONREQ): case (DATA): if(!skb_queue_empty(&pdata->retransmit_queue)) { skb_queue_tail(&pdata->transmit_queue, skb); return 0; } case (TQUEUE): pdata->retransmit.expires = jiffies + spx_calc_rtt(0); add_timer(&pdata->retransmit); skb2 = skb_clone(skb, GFP_NOIO); if(skb2 == NULL) return -ENOBUFS; skb_queue_tail(&pdata->retransmit_queue, skb2); case (ACK): case (CONACK): case (WDREQ): case (WDACK): case (DISCON): case (DISACK): case (RETRAN): default: /* Send data */ err = ipxrtr_route_skb(skb); if(err) kfree_skb(skb); } return (err); } /* SPX packet transmit engine */ static int spx_transmit(struct sock *sk, struct sk_buff *skb, int type, int len) { struct spx_opt *pdata = &sk->tp_pinfo.af_spx; struct ipxspxhdr *ipxh; unsigned long flags; int err; if(skb == NULL) { int offset = ipx_if_offset(pdata->dest_addr.net); int size = offset + sizeof(struct ipxspxhdr); if (offset < 0) /* ENETUNREACH */ return(-ENETUNREACH); save_flags(flags); cli(); skb = sock_alloc_send_skb(sk, size, 0, &err); if(skb == NULL) { restore_flags(flags); return (-ENOMEM); } skb_reserve(skb, offset); skb->h.raw = skb->nh.raw = skb_put(skb,sizeof(struct ipxspxhdr)); restore_flags(flags); } /* IPX header */ ipxh = (struct ipxspxhdr *)skb->nh.raw; ipxh->ipx.ipx_checksum = 0xFFFF; ipxh->ipx.ipx_pktsize = htons(SPX_SYS_PKT_LEN); ipxh->ipx.ipx_tctrl = 0; ipxh->ipx.ipx_type = IPX_TYPE_SPX; ipxh->ipx.ipx_dest = pdata->dest_addr; ipxh->ipx.ipx_source = pdata->source_addr; /* SPX header */ ipxh->spx.dtype = 0; ipxh->spx.sequence = htons(pdata->sequence); ipxh->spx.ackseq = htons(pdata->rmt_seq); ipxh->spx.sconn = pdata->source_connid; ipxh->spx.dconn = pdata->dest_connid; ipxh->spx.allocseq = htons(pdata->alloc); /* Reset/Set WD timer */ mod_timer(&pdata->watchdog, jiffies+VERIFY_TIMEOUT); switch(type) { case (DATA): /* Data */ ipxh->ipx.ipx_pktsize = htons(SPX_SYS_PKT_LEN + len); ipxh->spx.cctl = (CCTL_ACK | CCTL_EOM); pdata->sequence++; break; case (ACK): /* ACK */ pdata->rmt_seq++; case (WDACK): /* WD ACK */ case (CONACK): /* Connection ACK */ ipxh->spx.cctl = CCTL_SYS; ipxh->spx.ackseq = htons(pdata->rmt_seq); break; case (CONREQ): /* Connection Request */ del_timer(&pdata->watchdog); case (WDREQ): /* WD Request */ pdata->source_connid = htons(connids++); pdata->dest_connid = 0xFFFF; pdata->alloc = 3 + pdata->rmt_seq; ipxh->spx.cctl = (CCTL_ACK | CCTL_SYS); ipxh->spx.sconn = pdata->source_connid; ipxh->spx.dconn = pdata->dest_connid; ipxh->spx.allocseq = htons(pdata->alloc); break; case (DISCON): /* Informed Disconnect */ ipxh->spx.cctl = CCTL_ACK; ipxh->spx.dtype = SPX_DTYPE_ECONN; break; case (DISACK): /* Informed Disconnect ACK */ ipxh->spx.cctl = 0; ipxh->spx.dtype = SPX_DTYPE_ECACK; ipxh->spx.sequence = 0; ipxh->spx.ackseq = htons(pdata->rmt_seq++); break; default: return (-EOPNOTSUPP); } /* Send data */ return (spx_route_skb(pdata, skb, type)); } /* Check the state of the connection and send a WD request if needed. */ static void spx_watchdog(unsigned long data) { struct sock *sk = (struct sock*)data; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; del_timer(&pdata->watchdog); if(pdata->state == SPX_CLOSED) return; if(pdata->retries > pdata->max_retries) { spx_close_socket(sk); /* Unilateral Abort */ return; } /* Send WD request */ spx_transmit(sk, NULL, WDREQ, 0); pdata->retries++; return; } static void spx_retransmit(unsigned long data) { struct sock *sk = (struct sock*)data; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; struct sk_buff *skb; unsigned long flags; int err; del_timer(&pdata->retransmit); if(pdata->state == SPX_CLOSED) return; if(pdata->retransmits > RETRY_COUNT) { spx_close_socket(sk); /* Unilateral Abort */ return; } /* Need to leave skb on the queue, aye the fear */ save_flags(flags); cli(); skb = skb_peek(&pdata->retransmit_queue); if(skb_cloned(skb)) skb = skb_copy(skb, GFP_ATOMIC); else skb = skb_clone(skb, GFP_ATOMIC); restore_flags(flags); pdata->retransmit.expires = jiffies + spx_calc_rtt(pdata->retransmits); add_timer(&pdata->retransmit); err = spx_route_skb(pdata, skb, RETRAN); pdata->retransmits++; return; } /* Check packet for retransmission, ConReqAck aware */ static int spx_retransmit_chk(struct spx_opt *pdata, int ackseq, int type) { struct ipxspxhdr *ipxh; struct sk_buff *skb; skb = skb_dequeue(&pdata->retransmit_queue); if(!skb) return (-ENOENT); /* Check Data/ACK seq */ switch(type) { case ACK: /* Check Sequence, Should == 1 */ ipxh = (struct ipxspxhdr *)skb->nh.raw; if(!(ntohs(ipxh->spx.sequence) - htons(ackseq))) break; case CONACK: del_timer(&pdata->retransmit); pdata->retransmits = 0; kfree_skb(skb); if(skb_queue_empty(&pdata->retransmit_queue)) { skb = skb_dequeue(&pdata->transmit_queue); if(skb != NULL) spx_route_skb(pdata, skb, TQUEUE); } return (0); } skb_queue_head(&pdata->retransmit_queue, skb); return (-1); } /* SPX packet receive engine */ void spx_rcv(struct sock *sk, int bytes) { struct sk_buff *skb; struct ipxspxhdr *ipxh; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; skb = skb_dequeue(&sk->receive_queue); if(skb == NULL) return; ipxh = (struct ipxspxhdr *)skb->nh.raw; /* Can't receive on a closed connection */ if((pdata->state == SPX_CLOSED) && (ipxh->spx.sequence != 0)) goto toss_skb; if(ntohs(ipxh->ipx.ipx_pktsize) < SPX_SYS_PKT_LEN) goto toss_skb; if(ipxh->ipx.ipx_type != IPX_TYPE_SPX) goto toss_skb; if(ntohs(ipxh->spx.ackseq) > pdata->sequence) goto toss_skb; /* Reset WD timer on any received packet */ del_timer(&pdata->watchdog); pdata->retries = 0; pdata->watchdog.expires = jiffies + ABORT_TIMEOUT; add_timer(&pdata->watchdog); switch(ipxh->spx.cctl) { case (CCTL_SYS | CCTL_ACK): if((ipxh->spx.sequence == 0) /* ConReq */ && (ipxh->spx.ackseq == 0) && (ipxh->spx.dconn == 0xFFFF)) { pdata->state = SPX_CONNECTED; pdata->dest_addr = ipxh->ipx.ipx_source; pdata->source_addr = ipxh->ipx.ipx_dest; pdata->dest_connid = ipxh->spx.sconn; pdata->alloc = 3 + ntohs(ipxh->spx.sequence); skb_queue_tail(&sk->receive_queue, skb); wake_up_interruptible(sk->sleep); } else /* WD Request */ spx_transmit(sk, skb, WDACK, 0); goto finish; case CCTL_SYS: /* ACK */ if((ipxh->spx.dtype == 0) /* ConReq ACK */ && (ipxh->spx.sconn != 0xFFFF) && (ipxh->spx.dconn != 0xFFFF) && (ipxh->spx.sequence == 0) && (ipxh->spx.ackseq == 0) && (pdata->state != SPX_CONNECTED)) { pdata->state = SPX_CONNECTED; pdata->dest_connid = ipxh->spx.sconn; if(spx_retransmit_chk(pdata, 0, CONACK) < 0) goto toss_skb; skb_queue_tail(&sk->receive_queue, skb); wake_up_interruptible(sk->sleep); goto finish; } spx_retransmit_chk(pdata, ipxh->spx.ackseq, ACK); goto toss_skb; case (CCTL_ACK): /* Informed Disconnect */ if(ipxh->spx.dtype == SPX_DTYPE_ECONN) { spx_transmit(sk, skb, DISACK, 0); spx_close_socket(sk); goto finish; } /* Fall through */ default: if(ntohs(ipxh->spx.sequence) == pdata->rmt_seq) { pdata->rmt_seq = ntohs(ipxh->spx.sequence); pdata->rmt_ack = ntohs(ipxh->spx.ackseq); pdata->alloc = pdata->rmt_seq + 3; if(pdata->rmt_ack > 0 || pdata->rmt_ack == 0) spx_retransmit_chk(pdata,pdata->rmt_ack, ACK); skb_queue_tail(&pdata->rcv_queue, skb); wake_up_interruptible(sk->sleep); if(ipxh->spx.cctl&CCTL_ACK) spx_transmit(sk, NULL, ACK, 0); goto finish; } if(ipxh->spx.dtype == SPX_DTYPE_ECACK) { if(pdata->state != SPX_CLOSED) spx_close_socket(sk); goto toss_skb; } } toss_skb: /* Catch All */ kfree_skb(skb); finish: return; } /* Get message/packet data from user-land */ static int spx_sendmsg(struct socket *sock, struct msghdr *msg, int len, struct scm_cookie *scm) { struct sock *sk = sock->sk; int flags = msg->msg_flags; struct sk_buff *skb; int err, offset, size; if(len > 534) return (-EMSGSIZE); if(sk->zapped) return (-ENOTCONN); /* Socket not bound */ if(flags&~MSG_DONTWAIT) return (-EINVAL); offset = ipx_if_offset(sk->tp_pinfo.af_spx.dest_addr.net); size = offset + sizeof(struct ipxspxhdr) + len; cli(); skb = sock_alloc_send_skb(sk, size, flags&MSG_DONTWAIT, &err); sti(); if(skb == NULL) return (err); skb->sk = sk; skb_reserve(skb, offset); skb->h.raw = skb->nh.raw = skb_put(skb, sizeof(struct ipxspxhdr)); err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if(err) { kfree_skb(skb); return (-EFAULT); } err = spx_transmit(sk, skb, DATA, len); if(err) return (-EAGAIN); return (len); } /* Send message/packet data to user-land */ static int spx_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags, struct scm_cookie *scm) { struct sk_buff *skb; struct ipxspxhdr *ispxh; struct sock *sk = sock->sk; struct spx_opt *pdata = &sk->tp_pinfo.af_spx; struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name; int copied, err; if(sk->zapped) return (-ENOTCONN); /* Socket not bound */ lock_sock(sk); restart: while(skb_queue_empty(&pdata->rcv_queue)) /* No data */ { /* Socket errors? */ err = sock_error(sk); if(err) return (err); /* Socket shut down? */ if(sk->shutdown & RCV_SHUTDOWN) return (-ESHUTDOWN); /* handle signals */ if(signal_pending(current)) return (-ERESTARTSYS); /* User doesn't want to wait */ if(flags&MSG_DONTWAIT) return (-EAGAIN); release_sock(sk); save_flags(flags); cli(); if(skb_peek(&pdata->rcv_queue) == NULL) interruptible_sleep_on(sk->sleep); restore_flags(flags); lock_sock(sk); } skb = skb_dequeue(&pdata->rcv_queue); if(skb == NULL) goto restart; ispxh = (struct ipxspxhdr *)skb->nh.raw; copied = ntohs(ispxh->ipx.ipx_pktsize) - SPX_SYS_PKT_LEN; if(copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } err = memcpy_toiovec(msg->msg_iov, skb->nh.raw+SPX_SYS_PKT_LEN, copied); if(err) return (-EFAULT); msg->msg_namelen = sizeof(*sipx); if(sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ispxh->ipx.ipx_source.sock; memcpy(sipx->sipx_node,ispxh->ipx.ipx_source.node,IPX_NODE_LEN); sipx->sipx_network = ispxh->ipx.ipx_source.net; sipx->sipx_type = ispxh->ipx.ipx_type; } kfree_skb(skb); release_sock(sk); return (copied); } /* * Functions which just wrap their IPX cousins */ static int spx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { int err; err = ipx_operations->bind(sock, uaddr, addr_len); return (err); } static int spx_getname (struct socket *sock, struct sockaddr *uaddr, int *usockaddr_len, int peer) { int err; err = ipx_operations->getname(sock, uaddr, usockaddr_len, peer); return (err); } static int spx_ioctl (struct socket *sock, unsigned int cmd, unsigned long arg) { int err; err = ipx_operations->ioctl(sock, cmd, arg); return (err); } static int spx_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen) { int err; err = ipx_operations->setsockopt(sock, level, optname, optval, optlen); return (err); } static int spx_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { int err; err = ipx_operations->getsockopt(sock, level, optname, optval, optlen); return (err); } static struct proto_ops SOCKOPS_WRAPPED(spx_ops) = { family: PF_IPX, release: spx_release, bind: spx_bind, connect: spx_connect, socketpair: sock_no_socketpair, accept: spx_accept, getname: spx_getname, poll: spx_datagram_poll, ioctl: spx_ioctl, listen: spx_listen, shutdown: sock_no_shutdown, setsockopt: spx_setsockopt, getsockopt: spx_getsockopt, sendmsg: spx_sendmsg, recvmsg: spx_recvmsg, mmap: sock_no_mmap, sendpage: sock_no_sendpage, }; #include <linux/smp_lock.h> SOCKOPS_WRAP(spx, PF_IPX); static struct net_proto_family spx_family_ops = { family: PF_IPX, create: spx_create, }; static char banner[] __initdata = KERN_INFO "NET4: Sequenced Packet eXchange (SPX) 0.02 for Linux NET4.0\n"; static int __init spx_proto_init(void) { int error; connids = (__u16)jiffies; /* initalize random */ error = ipx_register_spx(&ipx_operations, &spx_family_ops); if (error) printk(KERN_ERR "SPX: unable to register with IPX.\n"); /* route socket(PF_IPX, SOCK_SEQPACKET) calls through spx_create() */ printk(banner); return 0; } module_init(spx_proto_init); static void __exit spx_proto_finito(void) { ipx_unregister_spx(); return; } module_exit(spx_proto_finito);
gpl-2.0
XianB/u-boot-yaffs2old
cpu/mpc8260/cpu_init.c
25
7621
/* * (C) Copyright 2000-2002 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <mpc8260.h> #include <asm/cpm_8260.h> #include <ioports.h> DECLARE_GLOBAL_DATA_PTR; #if defined(CONFIG_BOARD_GET_CPU_CLK_F) extern unsigned long board_get_cpu_clk_f (void); #endif static void config_8260_ioports (volatile immap_t * immr) { int portnum; for (portnum = 0; portnum < 4; portnum++) { uint pmsk = 0, ppar = 0, psor = 0, pdir = 0, podr = 0, pdat = 0; iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0]; iop_conf_t *eiopc = iopc + 32; uint msk = 1; /* * NOTE: * index 0 refers to pin 31, * index 31 refers to pin 0 */ while (iopc < eiopc) { if (iopc->conf) { pmsk |= msk; if (iopc->ppar) ppar |= msk; if (iopc->psor) psor |= msk; if (iopc->pdir) pdir |= msk; if (iopc->podr) podr |= msk; if (iopc->pdat) pdat |= msk; } msk <<= 1; iopc++; } if (pmsk != 0) { volatile ioport_t *iop = ioport_addr (immr, portnum); uint tpmsk = ~pmsk; /* * the (somewhat confused) paragraph at the * bottom of page 35-5 warns that there might * be "unknown behaviour" when programming * PSORx and PDIRx, if PPARx = 1, so I * decided this meant I had to disable the * dedicated function first, and enable it * last. */ iop->ppar &= tpmsk; iop->psor = (iop->psor & tpmsk) | psor; iop->podr = (iop->podr & tpmsk) | podr; iop->pdat = (iop->pdat & tpmsk) | pdat; iop->pdir = (iop->pdir & tpmsk) | pdir; iop->ppar |= ppar; } } } #define SET_VAL_MASK(a, b, mask) ((a & mask) | (b & ~mask)) /* * Breath some life into the CPU... * * Set up the memory map, * initialize a bunch of registers, * initialize the UPM's */ void cpu_init_f (volatile immap_t * immr) { #if !defined(CONFIG_COGENT) /* done in start.S for the cogent */ uint sccr; #endif #if defined(CONFIG_BOARD_GET_CPU_CLK_F) unsigned long cpu_clk; #endif volatile memctl8260_t *memctl = &immr->im_memctl; extern void m8260_cpm_reset (void); /* Pointer is writable since we allocated a register for it */ gd = (gd_t *) (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET); /* Clear initial global data */ memset ((void *) gd, 0, sizeof (gd_t)); /* RSR - Reset Status Register - clear all status (5-4) */ gd->reset_status = immr->im_clkrst.car_rsr; immr->im_clkrst.car_rsr = RSR_ALLBITS; /* RMR - Reset Mode Register - contains checkstop reset enable (5-5) */ immr->im_clkrst.car_rmr = CFG_RMR; /* BCR - Bus Configuration Register (4-25) */ #if defined(CFG_BCR_60x) && (CFG_BCR_SINGLE) if (immr->im_siu_conf.sc_bcr & BCR_EBM) { immr->im_siu_conf.sc_bcr = SET_VAL_MASK(immr->im_siu_conf.sc_bcr, CFG_BCR_60x, 0x80000010); } else { immr->im_siu_conf.sc_bcr = SET_VAL_MASK(immr->im_siu_conf.sc_bcr, CFG_BCR_SINGLE, 0x80000010); } #else immr->im_siu_conf.sc_bcr = CFG_BCR; #endif /* SIUMCR - contains debug pin configuration (4-31) */ #if defined(CFG_SIUMCR_LOW) && (CFG_SIUMCR_HIGH) cpu_clk = board_get_cpu_clk_f (); if (cpu_clk >= 100000000) { immr->im_siu_conf.sc_siumcr = SET_VAL_MASK(immr->im_siu_conf.sc_siumcr, CFG_SIUMCR_HIGH, 0x9f3cc000); } else { immr->im_siu_conf.sc_siumcr = SET_VAL_MASK(immr->im_siu_conf.sc_siumcr, CFG_SIUMCR_LOW, 0x9f3cc000); } #else immr->im_siu_conf.sc_siumcr = CFG_SIUMCR; #endif config_8260_ioports (immr); /* initialize time counter status and control register (4-40) */ immr->im_sit.sit_tmcntsc = CFG_TMCNTSC; /* initialize the PIT (4-42) */ immr->im_sit.sit_piscr = CFG_PISCR; #if !defined(CONFIG_COGENT) /* done in start.S for the cogent */ /* System clock control register (9-8) */ sccr = immr->im_clkrst.car_sccr & (SCCR_PCI_MODE | SCCR_PCI_MODCK | SCCR_PCIDF_MSK); immr->im_clkrst.car_sccr = sccr | (CFG_SCCR & ~(SCCR_PCI_MODE | SCCR_PCI_MODCK | SCCR_PCIDF_MSK) ); #endif /* !CONFIG_COGENT */ /* * Memory Controller: */ /* Map banks 0 and 1 to the FLASH banks 0 and 1 at preliminary * addresses - these have to be modified later when FLASH size * has been determined */ #if defined(CFG_OR0_REMAP) memctl->memc_or0 = CFG_OR0_REMAP; #endif #if defined(CFG_OR1_REMAP) memctl->memc_or1 = CFG_OR1_REMAP; #endif /* now restrict to preliminary range */ /* the PS came from the HRCW, don´t change it */ memctl->memc_br0 = SET_VAL_MASK(memctl->memc_br0 , CFG_BR0_PRELIM, BRx_PS_MSK); memctl->memc_or0 = CFG_OR0_PRELIM; #if defined(CFG_BR1_PRELIM) && defined(CFG_OR1_PRELIM) memctl->memc_or1 = CFG_OR1_PRELIM; memctl->memc_br1 = CFG_BR1_PRELIM; #endif #if defined(CFG_BR2_PRELIM) && defined(CFG_OR2_PRELIM) memctl->memc_or2 = CFG_OR2_PRELIM; memctl->memc_br2 = CFG_BR2_PRELIM; #endif #if defined(CFG_BR3_PRELIM) && defined(CFG_OR3_PRELIM) memctl->memc_or3 = CFG_OR3_PRELIM; memctl->memc_br3 = CFG_BR3_PRELIM; #endif #if defined(CFG_BR4_PRELIM) && defined(CFG_OR4_PRELIM) memctl->memc_or4 = CFG_OR4_PRELIM; memctl->memc_br4 = CFG_BR4_PRELIM; #endif #if defined(CFG_BR5_PRELIM) && defined(CFG_OR5_PRELIM) memctl->memc_or5 = CFG_OR5_PRELIM; memctl->memc_br5 = CFG_BR5_PRELIM; #endif #if defined(CFG_BR6_PRELIM) && defined(CFG_OR6_PRELIM) memctl->memc_or6 = CFG_OR6_PRELIM; memctl->memc_br6 = CFG_BR6_PRELIM; #endif #if defined(CFG_BR7_PRELIM) && defined(CFG_OR7_PRELIM) memctl->memc_or7 = CFG_OR7_PRELIM; memctl->memc_br7 = CFG_BR7_PRELIM; #endif #if defined(CFG_BR8_PRELIM) && defined(CFG_OR8_PRELIM) memctl->memc_or8 = CFG_OR8_PRELIM; memctl->memc_br8 = CFG_BR8_PRELIM; #endif #if defined(CFG_BR9_PRELIM) && defined(CFG_OR9_PRELIM) memctl->memc_or9 = CFG_OR9_PRELIM; memctl->memc_br9 = CFG_BR9_PRELIM; #endif #if defined(CFG_BR10_PRELIM) && defined(CFG_OR10_PRELIM) memctl->memc_or10 = CFG_OR10_PRELIM; memctl->memc_br10 = CFG_BR10_PRELIM; #endif #if defined(CFG_BR11_PRELIM) && defined(CFG_OR11_PRELIM) memctl->memc_or11 = CFG_OR11_PRELIM; memctl->memc_br11 = CFG_BR11_PRELIM; #endif m8260_cpm_reset (); } /* * initialize higher level parts of CPU like time base and timers */ int cpu_init_r (void) { volatile immap_t *immr = (immap_t *) gd->bd->bi_immr_base; immr->im_cpm.cp_rccr = CFG_RCCR; return (0); } /* * print out the reason for the reset */ int prt_8260_rsr (void) { static struct { ulong mask; char *desc; } bits[] = { { RSR_JTRS, "JTAG"}, { RSR_CSRS, "Check Stop"}, { RSR_SWRS, "Software Watchdog"}, { RSR_BMRS, "Bus Monitor"}, { RSR_ESRS, "External Soft"}, { RSR_EHRS, "External Hard"} }; static int n = sizeof bits / sizeof bits[0]; ulong rsr = gd->reset_status; int i; char *sep; puts (CPU_ID_STR " Reset Status:"); sep = " "; for (i = 0; i < n; i++) if (rsr & bits[i].mask) { printf ("%s%s", sep, bits[i].desc); sep = ", "; } puts ("\n\n"); return (0); }
gpl-2.0
duowing/HTC10
arch/x86/kvm/pmu.c
537
14793
/* * Kernel-based Virtual Machine -- Performance Monitoring Unit support * * Copyright 2011 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@redhat.com> * Gleb Natapov <gleb@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/types.h> #include <linux/kvm_host.h> #include <linux/perf_event.h> #include <asm/perf_event.h> #include "x86.h" #include "cpuid.h" #include "lapic.h" static struct kvm_arch_event_perf_mapping { u8 eventsel; u8 unit_mask; unsigned event_type; bool inexact; } arch_events[] = { /* Index must match CPUID 0x0A.EBX bit vector */ [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES }, [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, }; /* mapping between fixed pmc index and arch_events array */ int fixed_pmc_events[] = {1, 0, 7}; static bool pmc_is_gp(struct kvm_pmc *pmc) { return pmc->type == KVM_PMC_GP; } static inline u64 pmc_bitmask(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; return pmu->counter_bitmask[pmc->type]; } static inline bool pmc_enabled(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); } static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, u32 base) { if (msr >= base && msr < base + pmu->nr_arch_gp_counters) return &pmu->gp_counters[msr - base]; return NULL; } static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) { int base = MSR_CORE_PERF_FIXED_CTR0; if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) return &pmu->fixed_counters[msr - base]; return NULL; } static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx) { return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx); } static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx) { if (idx < INTEL_PMC_IDX_FIXED) return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0); else return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED); } void kvm_deliver_pmi(struct kvm_vcpu *vcpu) { if (vcpu->arch.apic) kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); } static void trigger_pmi(struct irq_work *irq_work) { struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu, arch.pmu); kvm_deliver_pmi(vcpu); } static void kvm_perf_overflow(struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs) { struct kvm_pmc *pmc = perf_event->overflow_handler_context; struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) { __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); kvm_make_request(KVM_REQ_PMU, pmc->vcpu); } } static void kvm_perf_overflow_intr(struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs) { struct kvm_pmc *pmc = perf_event->overflow_handler_context; struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) { __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); kvm_make_request(KVM_REQ_PMU, pmc->vcpu); /* * Inject PMI. If vcpu was in a guest mode during NMI PMI * can be ejected on a guest mode re-entry. Otherwise we can't * be sure that vcpu wasn't executing hlt instruction at the * time of vmexit and is not going to re-enter guest mode until, * woken up. So we should wake it, but this is impossible from * NMI context. Do it from irq work instead. */ if (!kvm_is_in_guest()) irq_work_queue(&pmc->vcpu->arch.pmu.irq_work); else kvm_make_request(KVM_REQ_PMI, pmc->vcpu); } } static u64 read_pmc(struct kvm_pmc *pmc) { u64 counter, enabled, running; counter = pmc->counter; if (pmc->perf_event) counter += perf_event_read_value(pmc->perf_event, &enabled, &running); /* FIXME: Scaling needed? */ return counter & pmc_bitmask(pmc); } static void stop_counter(struct kvm_pmc *pmc) { if (pmc->perf_event) { pmc->counter = read_pmc(pmc); perf_event_release_kernel(pmc->perf_event); pmc->perf_event = NULL; } } static void reprogram_counter(struct kvm_pmc *pmc, u32 type, unsigned config, bool exclude_user, bool exclude_kernel, bool intr, bool in_tx, bool in_tx_cp) { struct perf_event *event; struct perf_event_attr attr = { .type = type, .size = sizeof(attr), .pinned = true, .exclude_idle = true, .exclude_host = 1, .exclude_user = exclude_user, .exclude_kernel = exclude_kernel, .config = config, }; if (in_tx) attr.config |= HSW_IN_TX; if (in_tx_cp) attr.config |= HSW_IN_TX_CHECKPOINTED; attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); event = perf_event_create_kernel_counter(&attr, -1, current, intr ? kvm_perf_overflow_intr : kvm_perf_overflow, pmc); if (IS_ERR(event)) { printk_once("kvm: pmu event creation failed %ld\n", PTR_ERR(event)); return; } pmc->perf_event = event; clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi); } static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select, u8 unit_mask) { int i; for (i = 0; i < ARRAY_SIZE(arch_events); i++) if (arch_events[i].eventsel == event_select && arch_events[i].unit_mask == unit_mask && (pmu->available_event_types & (1 << i))) break; if (i == ARRAY_SIZE(arch_events)) return PERF_COUNT_HW_MAX; return arch_events[i].event_type; } static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { unsigned config, type = PERF_TYPE_RAW; u8 event_select, unit_mask; if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) printk_once("kvm pmu: pin control bit is ignored\n"); pmc->eventsel = eventsel; stop_counter(pmc); if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc)) return; event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | ARCH_PERFMON_EVENTSEL_INV | ARCH_PERFMON_EVENTSEL_CMASK | HSW_IN_TX | HSW_IN_TX_CHECKPOINTED))) { config = find_arch_event(&pmc->vcpu->arch.pmu, event_select, unit_mask); if (config != PERF_COUNT_HW_MAX) type = PERF_TYPE_HARDWARE; } if (type == PERF_TYPE_RAW) config = eventsel & X86_RAW_EVENT_MASK; reprogram_counter(pmc, type, config, !(eventsel & ARCH_PERFMON_EVENTSEL_USR), !(eventsel & ARCH_PERFMON_EVENTSEL_OS), eventsel & ARCH_PERFMON_EVENTSEL_INT, (eventsel & HSW_IN_TX), (eventsel & HSW_IN_TX_CHECKPOINTED)); } static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) { unsigned en = en_pmi & 0x3; bool pmi = en_pmi & 0x8; stop_counter(pmc); if (!en || !pmc_enabled(pmc)) return; reprogram_counter(pmc, PERF_TYPE_HARDWARE, arch_events[fixed_pmc_events[idx]].event_type, !(en & 0x2), /* exclude user */ !(en & 0x1), /* exclude kernel */ pmi, false, false); } static inline u8 fixed_en_pmi(u64 ctrl, int idx) { return (ctrl >> (idx * 4)) & 0xf; } static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) { int i; for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { u8 en_pmi = fixed_en_pmi(data, i); struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i); if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi) continue; reprogram_fixed_counter(pmc, en_pmi, i); } pmu->fixed_ctr_ctrl = data; } static void reprogram_idx(struct kvm_pmu *pmu, int idx) { struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx); if (!pmc) return; if (pmc_is_gp(pmc)) reprogram_gp_counter(pmc, pmc->eventsel); else { int fidx = idx - INTEL_PMC_IDX_FIXED; reprogram_fixed_counter(pmc, fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx); } } static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) { int bit; u64 diff = pmu->global_ctrl ^ data; pmu->global_ctrl = data; for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) reprogram_idx(pmu, bit); } bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = &vcpu->arch.pmu; int ret; switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_CORE_PERF_GLOBAL_STATUS: case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_OVF_CTRL: ret = pmu->version > 1; break; default: ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || get_fixed_pmc(pmu, msr); break; } return ret; } int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) { struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmc *pmc; switch (index) { case MSR_CORE_PERF_FIXED_CTR_CTRL: *data = pmu->fixed_ctr_ctrl; return 0; case MSR_CORE_PERF_GLOBAL_STATUS: *data = pmu->global_status; return 0; case MSR_CORE_PERF_GLOBAL_CTRL: *data = pmu->global_ctrl; return 0; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: *data = pmu->global_ovf_ctrl; return 0; default: if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || (pmc = get_fixed_pmc(pmu, index))) { *data = read_pmc(pmc); return 0; } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { *data = pmc->eventsel; return 0; } } return 1; } int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmc *pmc; u32 index = msr_info->index; u64 data = msr_info->data; switch (index) { case MSR_CORE_PERF_FIXED_CTR_CTRL: if (pmu->fixed_ctr_ctrl == data) return 0; if (!(data & 0xfffffffffffff444ull)) { reprogram_fixed_counters(pmu, data); return 0; } break; case MSR_CORE_PERF_GLOBAL_STATUS: if (msr_info->host_initiated) { pmu->global_status = data; return 0; } break; /* RO MSR */ case MSR_CORE_PERF_GLOBAL_CTRL: if (pmu->global_ctrl == data) return 0; if (!(data & pmu->global_ctrl_mask)) { global_ctrl_changed(pmu, data); return 0; } break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) { if (!msr_info->host_initiated) pmu->global_status &= ~data; pmu->global_ovf_ctrl = data; return 0; } break; default: if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || (pmc = get_fixed_pmc(pmu, index))) { if (!msr_info->host_initiated) data = (s64)(s32)data; pmc->counter += data - read_pmc(pmc); return 0; } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { if (data == pmc->eventsel) return 0; if (!(data & pmu->reserved_bits)) { reprogram_gp_counter(pmc, data); return 0; } } } return 1; } int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc) { struct kvm_pmu *pmu = &vcpu->arch.pmu; bool fixed = pmc & (1u << 30); pmc &= ~(3u << 30); return (!fixed && pmc >= pmu->nr_arch_gp_counters) || (fixed && pmc >= pmu->nr_arch_fixed_counters); } int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) { struct kvm_pmu *pmu = &vcpu->arch.pmu; bool fast_mode = pmc & (1u << 31); bool fixed = pmc & (1u << 30); struct kvm_pmc *counters; u64 ctr; pmc &= ~(3u << 30); if (!fixed && pmc >= pmu->nr_arch_gp_counters) return 1; if (fixed && pmc >= pmu->nr_arch_fixed_counters) return 1; counters = fixed ? pmu->fixed_counters : pmu->gp_counters; ctr = read_pmc(&counters[pmc]); if (fast_mode) ctr = (u32)ctr; *data = ctr; return 0; } void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_cpuid_entry2 *entry; union cpuid10_eax eax; union cpuid10_edx edx; pmu->nr_arch_gp_counters = 0; pmu->nr_arch_fixed_counters = 0; pmu->counter_bitmask[KVM_PMC_GP] = 0; pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; pmu->reserved_bits = 0xffffffff00200000ull; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry) return; eax.full = entry->eax; edx.full = entry->edx; pmu->version = eax.split.version_id; if (!pmu->version) return; pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, INTEL_PMC_MAX_GENERIC); pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; pmu->available_event_types = ~entry->ebx & ((1ull << eax.split.mask_length) - 1); if (pmu->version == 1) { pmu->nr_arch_fixed_counters = 0; } else { pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, INTEL_PMC_MAX_FIXED); pmu->counter_bitmask[KVM_PMC_FIXED] = ((u64)1 << edx.split.bit_width_fixed) - 1; } pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; entry = kvm_find_cpuid_entry(vcpu, 7, 0); if (entry && (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; } void kvm_pmu_init(struct kvm_vcpu *vcpu) { int i; struct kvm_pmu *pmu = &vcpu->arch.pmu; memset(pmu, 0, sizeof(*pmu)); for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].idx = i; } for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { pmu->fixed_counters[i].type = KVM_PMC_FIXED; pmu->fixed_counters[i].vcpu = vcpu; pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; } init_irq_work(&pmu->irq_work, trigger_pmi); kvm_pmu_cpuid_update(vcpu); } void kvm_pmu_reset(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; int i; irq_work_sync(&pmu->irq_work); for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { struct kvm_pmc *pmc = &pmu->gp_counters[i]; stop_counter(pmc); pmc->counter = pmc->eventsel = 0; } for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) stop_counter(&pmu->fixed_counters[i]); pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = pmu->global_ovf_ctrl = 0; } void kvm_pmu_destroy(struct kvm_vcpu *vcpu) { kvm_pmu_reset(vcpu); } void kvm_handle_pmu_event(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; u64 bitmask; int bit; bitmask = pmu->reprogram_pmi; for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); if (unlikely(!pmc || !pmc->perf_event)) { clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); continue; } reprogram_idx(pmu, bit); } }
gpl-2.0
frankiek3/android_kernel_samsung_intercept
arch/mips/alchemy/board-mtx1.c
2073
8094
/* * MTX-1 platform devices registration (Au1500) * * Copyright (C) 2007-2009, Florian Fainelli <florian@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <mtd/mtd-abi.h> #include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_eth.h> #include <prom.h> const char *get_system_type(void) { return "MTX-1"; } void __init prom_init(void) { unsigned char *memsize_str; unsigned long memsize; prom_argc = fw_arg0; prom_argv = (char **)fw_arg1; prom_envp = (char **)fw_arg2; prom_init_cmdline(); memsize_str = prom_getenv("memsize"); if (!memsize_str) memsize = 0x04000000; else strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); } void prom_putchar(unsigned char c) { alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } static void mtx1_reset(char *c) { /* Jump to the reset vector */ __asm__ __volatile__("jr\t%0" : : "r"(0xbfc00000)); } static void mtx1_power_off(void) { while (1) asm volatile ( " .set mips32 \n" " wait \n" " .set mips0 \n"); } void __init board_setup(void) { #if IS_ENABLED(CONFIG_USB_OHCI_HCD) /* Enable USB power switch */ alchemy_gpio_direction_output(204, 0); #endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */ /* Initialize sys_pinfunc */ au_writel(SYS_PF_NI2, SYS_PINFUNC); /* Initialize GPIO */ au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR); alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ alchemy_gpio_direction_output(5, 0); /* Disable eth PHY TX_ER */ /* Enable LED and set it to green */ alchemy_gpio_direction_output(211, 1); /* green on */ alchemy_gpio_direction_output(212, 0); /* red off */ pm_power_off = mtx1_power_off; _machine_halt = mtx1_power_off; _machine_restart = mtx1_reset; printk(KERN_INFO "4G Systems MTX-1 Board\n"); } /******************************************************************************/ static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, .code = BTN_0, .desc = "System button", } }; static struct gpio_keys_platform_data mtx1_buttons_data = { .buttons = mtx1_gpio_button, .nbuttons = ARRAY_SIZE(mtx1_gpio_button), }; static struct platform_device mtx1_button = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &mtx1_buttons_data, } }; static struct resource mtx1_wdt_res[] = { [0] = { .start = 215, .end = 215, .name = "mtx1-wdt-gpio", .flags = IORESOURCE_IRQ, } }; static struct platform_device mtx1_wdt = { .name = "mtx1-wdt", .id = 0, .num_resources = ARRAY_SIZE(mtx1_wdt_res), .resource = mtx1_wdt_res, }; static struct gpio_led default_leds[] = { { .name = "mtx1:green", .gpio = 211, }, { .name = "mtx1:red", .gpio = 212, }, }; static struct gpio_led_platform_data mtx1_led_data = { .num_leds = ARRAY_SIZE(default_leds), .leds = default_leds, }; static struct platform_device mtx1_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &mtx1_led_data, } }; static struct mtd_partition mtx1_mtd_partitions[] = { { .name = "filesystem", .size = 0x01C00000, .offset = 0, }, { .name = "yamon", .size = 0x00100000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "kernel", .size = 0x002c0000, .offset = MTDPART_OFS_APPEND, }, { .name = "yamon env", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, }, }; static struct physmap_flash_data mtx1_flash_data = { .width = 4, .nr_parts = 4, .parts = mtx1_mtd_partitions, }; static struct resource mtx1_mtd_resource = { .start = 0x1e000000, .end = 0x1fffffff, .flags = IORESOURCE_MEM, }; static struct platform_device mtx1_mtd = { .name = "physmap-flash", .dev = { .platform_data = &mtx1_flash_data, }, .num_resources = 1, .resource = &mtx1_mtd_resource, }; static struct resource alchemy_pci_host_res[] = { [0] = { .start = AU1500_PCI_PHYS_ADDR, .end = AU1500_PCI_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, }; static int mtx1_pci_idsel(unsigned int devsel, int assert) { /* This function is only necessary to support a proprietary Cardbus * adapter on the mtx-1 "singleboard" variant. It triggers a custom * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. */ udelay(1); if (assert && devsel != 0) /* Suppress signal to Cardbus */ alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ else alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */ udelay(1); return 1; } static const char mtx1_irqtab[][5] = { [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 00 - AdapterA-Slot0 (top) */ [1] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */ [2] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 02 - AdapterB-Slot0 (top) */ [3] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */ [4] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 04 - AdapterC-Slot0 (top) */ [5] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */ [6] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 06 - AdapterD-Slot0 (top) */ [7] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */ }; static int mtx1_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) { return mtx1_irqtab[slot][pin]; } static struct alchemy_pci_platdata mtx1_pci_pd = { .board_map_irq = mtx1_map_pci_irq, .board_pci_idsel = mtx1_pci_idsel, .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | PCI_CONFIG_CH | #if defined(__MIPSEB__) PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, #else 0, #endif }; static struct platform_device mtx1_pci_host = { .dev.platform_data = &mtx1_pci_pd, .name = "alchemy-pci", .id = 0, .num_resources = ARRAY_SIZE(alchemy_pci_host_res), .resource = alchemy_pci_host_res, }; static struct __initdata platform_device * mtx1_devs[] = { &mtx1_pci_host, &mtx1_gpio_leds, &mtx1_wdt, &mtx1_button, &mtx1_mtd, }; static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { .phy_search_highest_addr = 1, .phy1_search_mac0 = 1, }; static int __init mtx1_register_devices(void) { int rc; irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_HIGH); irq_set_irq_type(AU1500_GPIO201_INT, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(AU1500_GPIO202_INT, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(AU1500_GPIO203_INT, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW); au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { printk(KERN_INFO "mtx1: failed to request %d\n", mtx1_gpio_button[0].gpio); goto out; } gpio_direction_input(mtx1_gpio_button[0].gpio); out: return platform_add_devices(mtx1_devs, ARRAY_SIZE(mtx1_devs)); } arch_initcall(mtx1_register_devices);
gpl-2.0
edoko/Air_Kernel_for_GN
arch/mips/netlogic/xlr/setup.c
2585
5248
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/kernel.h> #include <linux/serial_8250.h> #include <linux/pm.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/bootinfo.h> #include <asm/smp-ops.h> #include <asm/netlogic/interrupt.h> #include <asm/netlogic/psb-bootinfo.h> #include <asm/netlogic/xlr/xlr.h> #include <asm/netlogic/xlr/iomap.h> #include <asm/netlogic/xlr/pic.h> #include <asm/netlogic/xlr/gpio.h> unsigned long netlogic_io_base = (unsigned long)(DEFAULT_NETLOGIC_IO_BASE); unsigned long nlm_common_ebase = 0x0; struct psb_info nlm_prom_info; static void nlm_early_serial_setup(void) { struct uart_port s; nlm_reg_t *uart_base; uart_base = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); memset(&s, 0, sizeof(s)); s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; s.iotype = UPIO_MEM32; s.regshift = 2; s.irq = PIC_UART_0_IRQ; s.uartclk = PIC_CLKS_PER_SEC; s.serial_in = nlm_xlr_uart_in; s.serial_out = nlm_xlr_uart_out; s.mapbase = (unsigned long)uart_base; s.membase = (unsigned char __iomem *)uart_base; early_serial_setup(&s); } static void nlm_linux_exit(void) { nlm_reg_t *mmio; mmio = netlogic_io_mmio(NETLOGIC_IO_GPIO_OFFSET); /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */ netlogic_write_reg(mmio, NETLOGIC_GPIO_SWRESET_REG, 1); for ( ; ; ) cpu_wait(); } void __init plat_mem_setup(void) { panic_timeout = 5; _machine_restart = (void (*)(char *))nlm_linux_exit; _machine_halt = nlm_linux_exit; pm_power_off = nlm_linux_exit; } const char *get_system_type(void) { return "Netlogic XLR/XLS Series"; } void __init prom_free_prom_memory(void) { /* Nothing yet */ } static void build_arcs_cmdline(int *argv) { int i, remain, len; char *arg; remain = sizeof(arcs_cmdline) - 1; arcs_cmdline[0] = '\0'; for (i = 0; argv[i] != 0; i++) { arg = (char *)(long)argv[i]; len = strlen(arg); if (len + 1 > remain) break; strcat(arcs_cmdline, arg); strcat(arcs_cmdline, " "); remain -= len + 1; } /* Add the default options here */ if ((strstr(arcs_cmdline, "console=")) == NULL) { arg = "console=ttyS0,38400 "; len = strlen(arg); if (len > remain) goto fail; strcat(arcs_cmdline, arg); remain -= len; } #ifdef CONFIG_BLK_DEV_INITRD if ((strstr(arcs_cmdline, "rdinit=")) == NULL) { arg = "rdinit=/sbin/init "; len = strlen(arg); if (len > remain) goto fail; strcat(arcs_cmdline, arg); remain -= len; } #endif return; fail: panic("Cannot add %s, command line too big!", arg); } static void prom_add_memory(void) { struct nlm_boot_mem_map *bootm; u64 start, size; u64 pref_backup = 512; /* avoid pref walking beyond end */ int i; bootm = (void *)(long)nlm_prom_info.psb_mem_map; for (i = 0; i < bootm->nr_map; i++) { if (bootm->map[i].type != BOOT_MEM_RAM) continue; start = bootm->map[i].addr; size = bootm->map[i].size; /* Work around for using bootloader mem */ if (i == 0 && start == 0 && size == 0x0c000000) size = 0x0ff00000; add_memory_region(start, size - pref_backup, BOOT_MEM_RAM); } } void __init prom_init(void) { int *argv, *envp; /* passed as 32 bit ptrs */ struct psb_info *prom_infop; /* truncate to 32 bit and sign extend all args */ argv = (int *)(long)(int)fw_arg1; envp = (int *)(long)(int)fw_arg2; prom_infop = (struct psb_info *)(long)(int)fw_arg3; nlm_prom_info = *prom_infop; nlm_early_serial_setup(); build_arcs_cmdline(argv); nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); prom_add_memory(); #ifdef CONFIG_SMP nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map); register_smp_ops(&nlm_smp_ops); #endif }
gpl-2.0
CyanogenMod/android_kernel_huawei_msm8226
drivers/rtc/rtc-ab8500.c
2841
12132
/* * Copyright (C) ST-Ericsson SA 2010 * * License terms: GNU General Public License (GPL) version 2 * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com> * * RTC clock driver for the RTC part of the AB8500 Power management chip. * Based on RTC clock driver for the AB3100 Analog Baseband Chip by * Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/delay.h> #define AB8500_RTC_SOFF_STAT_REG 0x00 #define AB8500_RTC_CC_CONF_REG 0x01 #define AB8500_RTC_READ_REQ_REG 0x02 #define AB8500_RTC_WATCH_TSECMID_REG 0x03 #define AB8500_RTC_WATCH_TSECHI_REG 0x04 #define AB8500_RTC_WATCH_TMIN_LOW_REG 0x05 #define AB8500_RTC_WATCH_TMIN_MID_REG 0x06 #define AB8500_RTC_WATCH_TMIN_HI_REG 0x07 #define AB8500_RTC_ALRM_MIN_LOW_REG 0x08 #define AB8500_RTC_ALRM_MIN_MID_REG 0x09 #define AB8500_RTC_ALRM_MIN_HI_REG 0x0A #define AB8500_RTC_STAT_REG 0x0B #define AB8500_RTC_BKUP_CHG_REG 0x0C #define AB8500_RTC_FORCE_BKUP_REG 0x0D #define AB8500_RTC_CALIB_REG 0x0E #define AB8500_RTC_SWITCH_STAT_REG 0x0F /* RtcReadRequest bits */ #define RTC_READ_REQUEST 0x01 #define RTC_WRITE_REQUEST 0x02 /* RtcCtrl bits */ #define RTC_ALARM_ENA 0x04 #define RTC_STATUS_DATA 0x01 #define COUNTS_PER_SEC (0xF000 / 60) #define AB8500_RTC_EPOCH 2000 static const u8 ab8500_rtc_time_regs[] = { AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG, AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG, AB8500_RTC_WATCH_TSECMID_REG }; static const u8 ab8500_rtc_alarm_regs[] = { AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG, AB8500_RTC_ALRM_MIN_LOW_REG }; /* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */ static unsigned long get_elapsed_seconds(int year) { unsigned long secs; struct rtc_time tm = { .tm_year = year - 1900, .tm_mday = 1, }; /* * This function calculates secs from 1970 and not from * 1900, even if we supply the offset from year 1900. */ rtc_tm_to_time(&tm, &secs); return secs; } static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned long timeout = jiffies + HZ; int retval, i; unsigned long mins, secs; unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; u8 value; /* Request a data read */ retval = abx500_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_READ_REQ_REG, RTC_READ_REQUEST); if (retval < 0) return retval; /* Early AB8500 chips will not clear the rtc read request bit */ if (abx500_get_chip_id(dev) == 0) { usleep_range(1000, 1000); } else { /* Wait for some cycles after enabling the rtc read in ab8500 */ while (time_before(jiffies, timeout)) { retval = abx500_get_register_interruptible(dev, AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value); if (retval < 0) return retval; if (!(value & RTC_READ_REQUEST)) break; usleep_range(1000, 5000); } } /* Read the Watchtime registers */ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { retval = abx500_get_register_interruptible(dev, AB8500_RTC, ab8500_rtc_time_regs[i], &value); if (retval < 0) return retval; buf[i] = value; } mins = (buf[0] << 16) | (buf[1] << 8) | buf[2]; secs = (buf[3] << 8) | buf[4]; secs = secs / COUNTS_PER_SEC; secs = secs + (mins * 60); /* Add back the initially subtracted number of seconds */ secs += get_elapsed_seconds(AB8500_RTC_EPOCH); rtc_time_to_tm(secs, tm); return rtc_valid_tm(tm); } static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm) { int retval, i; unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; unsigned long no_secs, no_mins, secs = 0; if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) { dev_dbg(dev, "year should be equal to or greater than %d\n", AB8500_RTC_EPOCH); return -EINVAL; } /* Get the number of seconds since 1970 */ rtc_tm_to_time(tm, &secs); /* * Convert it to the number of seconds since 01-01-2000 00:00:00, since * we only have a small counter in the RTC. */ secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); no_mins = secs / 60; no_secs = secs % 60; /* Make the seconds count as per the RTC resolution */ no_secs = no_secs * COUNTS_PER_SEC; buf[4] = no_secs & 0xFF; buf[3] = (no_secs >> 8) & 0xFF; buf[2] = no_mins & 0xFF; buf[1] = (no_mins >> 8) & 0xFF; buf[0] = (no_mins >> 16) & 0xFF; for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { retval = abx500_set_register_interruptible(dev, AB8500_RTC, ab8500_rtc_time_regs[i], buf[i]); if (retval < 0) return retval; } /* Request a data write */ return abx500_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST); } static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int retval, i; u8 rtc_ctrl, value; unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; unsigned long secs, mins; /* Check if the alarm is enabled or not */ retval = abx500_get_register_interruptible(dev, AB8500_RTC, AB8500_RTC_STAT_REG, &rtc_ctrl); if (retval < 0) return retval; if (rtc_ctrl & RTC_ALARM_ENA) alarm->enabled = 1; else alarm->enabled = 0; alarm->pending = 0; for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { retval = abx500_get_register_interruptible(dev, AB8500_RTC, ab8500_rtc_alarm_regs[i], &value); if (retval < 0) return retval; buf[i] = value; } mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]); secs = mins * 60; /* Add back the initially subtracted number of seconds */ secs += get_elapsed_seconds(AB8500_RTC_EPOCH); rtc_time_to_tm(secs, &alarm->time); return rtc_valid_tm(&alarm->time); } static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled) { return abx500_mask_and_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_STAT_REG, RTC_ALARM_ENA, enabled ? RTC_ALARM_ENA : 0); } static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int retval, i; unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; unsigned long mins, secs = 0; if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) { dev_dbg(dev, "year should be equal to or greater than %d\n", AB8500_RTC_EPOCH); return -EINVAL; } /* Get the number of seconds since 1970 */ rtc_tm_to_time(&alarm->time, &secs); /* * Convert it to the number of seconds since 01-01-2000 00:00:00, since * we only have a small counter in the RTC. */ secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); mins = secs / 60; buf[2] = mins & 0xFF; buf[1] = (mins >> 8) & 0xFF; buf[0] = (mins >> 16) & 0xFF; /* Set the alarm time */ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { retval = abx500_set_register_interruptible(dev, AB8500_RTC, ab8500_rtc_alarm_regs[i], buf[i]); if (retval < 0) return retval; } return ab8500_rtc_irq_enable(dev, alarm->enabled); } static int ab8500_rtc_set_calibration(struct device *dev, int calibration) { int retval; u8 rtccal = 0; /* * Check that the calibration value (which is in units of 0.5 * parts-per-million) is in the AB8500's range for RtcCalibration * register. -128 (0x80) is not permitted because the AB8500 uses * a sign-bit rather than two's complement, so 0x80 is just another * representation of zero. */ if ((calibration < -127) || (calibration > 127)) { dev_err(dev, "RtcCalibration value outside permitted range\n"); return -EINVAL; } /* * The AB8500 uses sign (in bit7) and magnitude (in bits0-7) * so need to convert to this sort of representation before writing * into RtcCalibration register... */ if (calibration >= 0) rtccal = 0x7F & calibration; else rtccal = ~(calibration - 1) | 0x80; retval = abx500_set_register_interruptible(dev, AB8500_RTC, AB8500_RTC_CALIB_REG, rtccal); return retval; } static int ab8500_rtc_get_calibration(struct device *dev, int *calibration) { int retval; u8 rtccal = 0; retval = abx500_get_register_interruptible(dev, AB8500_RTC, AB8500_RTC_CALIB_REG, &rtccal); if (retval >= 0) { /* * The AB8500 uses sign (in bit7) and magnitude (in bits0-7) * so need to convert value from RtcCalibration register into * a two's complement signed value... */ if (rtccal & 0x80) *calibration = 0 - (rtccal & 0x7F); else *calibration = 0x7F & rtccal; } return retval; } static ssize_t ab8500_sysfs_store_rtc_calibration(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; int calibration = 0; if (sscanf(buf, " %i ", &calibration) != 1) { dev_err(dev, "Failed to store RTC calibration attribute\n"); return -EINVAL; } retval = ab8500_rtc_set_calibration(dev, calibration); return retval ? retval : count; } static ssize_t ab8500_sysfs_show_rtc_calibration(struct device *dev, struct device_attribute *attr, char *buf) { int retval = 0; int calibration = 0; retval = ab8500_rtc_get_calibration(dev, &calibration); if (retval < 0) { dev_err(dev, "Failed to read RTC calibration attribute\n"); sprintf(buf, "0\n"); return retval; } return sprintf(buf, "%d\n", calibration); } static DEVICE_ATTR(rtc_calibration, S_IRUGO | S_IWUSR, ab8500_sysfs_show_rtc_calibration, ab8500_sysfs_store_rtc_calibration); static int ab8500_sysfs_rtc_register(struct device *dev) { return device_create_file(dev, &dev_attr_rtc_calibration); } static void ab8500_sysfs_rtc_unregister(struct device *dev) { device_remove_file(dev, &dev_attr_rtc_calibration); } static irqreturn_t rtc_alarm_handler(int irq, void *data) { struct rtc_device *rtc = data; unsigned long events = RTC_IRQF | RTC_AF; dev_dbg(&rtc->dev, "%s\n", __func__); rtc_update_irq(rtc, 1, events); return IRQ_HANDLED; } static const struct rtc_class_ops ab8500_rtc_ops = { .read_time = ab8500_rtc_read_time, .set_time = ab8500_rtc_set_time, .read_alarm = ab8500_rtc_read_alarm, .set_alarm = ab8500_rtc_set_alarm, .alarm_irq_enable = ab8500_rtc_irq_enable, }; static int __devinit ab8500_rtc_probe(struct platform_device *pdev) { int err; struct rtc_device *rtc; u8 rtc_ctrl; int irq; irq = platform_get_irq_byname(pdev, "ALARM"); if (irq < 0) return irq; /* For RTC supply test */ err = abx500_mask_and_set_register_interruptible(&pdev->dev, AB8500_RTC, AB8500_RTC_STAT_REG, RTC_STATUS_DATA, RTC_STATUS_DATA); if (err < 0) return err; /* Wait for reset by the PorRtc */ usleep_range(1000, 5000); err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC, AB8500_RTC_STAT_REG, &rtc_ctrl); if (err < 0) return err; /* Check if the RTC Supply fails */ if (!(rtc_ctrl & RTC_STATUS_DATA)) { dev_err(&pdev->dev, "RTC supply failure\n"); return -ENODEV; } device_init_wakeup(&pdev->dev, true); rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { dev_err(&pdev->dev, "Registration failed\n"); err = PTR_ERR(rtc); return err; } err = request_threaded_irq(irq, NULL, rtc_alarm_handler, IRQF_NO_SUSPEND, "ab8500-rtc", rtc); if (err < 0) { rtc_device_unregister(rtc); return err; } platform_set_drvdata(pdev, rtc); err = ab8500_sysfs_rtc_register(&pdev->dev); if (err) { dev_err(&pdev->dev, "sysfs RTC failed to register\n"); return err; } return 0; } static int __devexit ab8500_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); int irq = platform_get_irq_byname(pdev, "ALARM"); ab8500_sysfs_rtc_unregister(&pdev->dev); free_irq(irq, rtc); rtc_device_unregister(rtc); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver ab8500_rtc_driver = { .driver = { .name = "ab8500-rtc", .owner = THIS_MODULE, }, .probe = ab8500_rtc_probe, .remove = __devexit_p(ab8500_rtc_remove), }; module_platform_driver(ab8500_rtc_driver); MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>"); MODULE_DESCRIPTION("AB8500 RTC Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
limitedev66/android_kernel_shooteru
sound/soc/imx/mx27vis-aic32x4.c
3097
3912
/* * mx27vis-aic32x4.c * * Copyright 2011 Vista Silicon S.L. * * Author: Javier Martin <javier.martin@vista-silicon.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <asm/mach-types.h> #include <mach/audmux.h> #include "../codecs/tlv320aic32x4.h" #include "imx-ssi.h" static int mx27vis_aic32x4_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret; u32 dai_format; dai_format = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM; /* set codec DAI configuration */ snd_soc_dai_set_fmt(codec_dai, dai_format); /* set cpu DAI configuration */ snd_soc_dai_set_fmt(cpu_dai, dai_format); ret = snd_soc_dai_set_sysclk(codec_dai, 0, 25000000, SND_SOC_CLOCK_OUT); if (ret) { pr_err("%s: failed setting codec sysclk\n", __func__); return ret; } ret = snd_soc_dai_set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0, SND_SOC_CLOCK_IN); if (ret) { pr_err("can't set CPU system clock IMX_SSP_SYS_CLK\n"); return ret; } return 0; } static struct snd_soc_ops mx27vis_aic32x4_snd_ops = { .hw_params = mx27vis_aic32x4_hw_params, }; static struct snd_soc_dai_link mx27vis_aic32x4_dai = { .name = "tlv320aic32x4", .stream_name = "TLV320AIC32X4", .codec_dai_name = "tlv320aic32x4-hifi", .platform_name = "imx-pcm-audio.0", .codec_name = "tlv320aic32x4.0-0018", .cpu_dai_name = "imx-ssi.0", .ops = &mx27vis_aic32x4_snd_ops, }; static struct snd_soc_card mx27vis_aic32x4 = { .name = "visstrim_m10-audio", .dai_link = &mx27vis_aic32x4_dai, .num_links = 1, }; static struct platform_device *mx27vis_aic32x4_snd_device; static int __init mx27vis_aic32x4_init(void) { int ret; mx27vis_aic32x4_snd_device = platform_device_alloc("soc-audio", -1); if (!mx27vis_aic32x4_snd_device) return -ENOMEM; platform_set_drvdata(mx27vis_aic32x4_snd_device, &mx27vis_aic32x4); ret = platform_device_add(mx27vis_aic32x4_snd_device); if (ret) { printk(KERN_ERR "ASoC: Platform device allocation failed\n"); platform_device_put(mx27vis_aic32x4_snd_device); } /* Connect SSI0 as clock slave to SSI1 external pins */ mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, MXC_AUDMUX_V1_PCR_SYN | MXC_AUDMUX_V1_PCR_TFSDIR | MXC_AUDMUX_V1_PCR_TCLKDIR | MXC_AUDMUX_V1_PCR_TFCSEL(MX27_AUDMUX_PPCR1_SSI_PINS_1) | MXC_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_PPCR1_SSI_PINS_1) ); mxc_audmux_v1_configure_port(MX27_AUDMUX_PPCR1_SSI_PINS_1, MXC_AUDMUX_V1_PCR_SYN | MXC_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0) ); return ret; } static void __exit mx27vis_aic32x4_exit(void) { platform_device_unregister(mx27vis_aic32x4_snd_device); } module_init(mx27vis_aic32x4_init); module_exit(mx27vis_aic32x4_exit); MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>"); MODULE_DESCRIPTION("ALSA SoC AIC32X4 mx27 visstrim"); MODULE_LICENSE("GPL");
gpl-2.0
Noeljunior/android_kernel_samsung_tuna
drivers/acpi/event.c
3097
6966
/* * event.c - exporting ACPI events via procfs * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * */ #include <linux/spinlock.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/gfp.h> #include <acpi/acpi_drivers.h> #include <net/netlink.h> #include <net/genetlink.h> #include "internal.h" #define _COMPONENT ACPI_SYSTEM_COMPONENT ACPI_MODULE_NAME("event"); #ifdef CONFIG_ACPI_PROC_EVENT /* Global vars for handling event proc entry */ static DEFINE_SPINLOCK(acpi_system_event_lock); int event_is_open = 0; extern struct list_head acpi_bus_event_list; extern wait_queue_head_t acpi_bus_event_queue; static int acpi_system_open_event(struct inode *inode, struct file *file) { spin_lock_irq(&acpi_system_event_lock); if (event_is_open) goto out_busy; event_is_open = 1; spin_unlock_irq(&acpi_system_event_lock); return 0; out_busy: spin_unlock_irq(&acpi_system_event_lock); return -EBUSY; } static ssize_t acpi_system_read_event(struct file *file, char __user * buffer, size_t count, loff_t * ppos) { int result = 0; struct acpi_bus_event event; static char str[ACPI_MAX_STRING]; static int chars_remaining = 0; static char *ptr; if (!chars_remaining) { memset(&event, 0, sizeof(struct acpi_bus_event)); if ((file->f_flags & O_NONBLOCK) && (list_empty(&acpi_bus_event_list))) return -EAGAIN; result = acpi_bus_receive_event(&event); if (result) return result; chars_remaining = sprintf(str, "%s %s %08x %08x\n", event.device_class ? event. device_class : "<unknown>", event.bus_id ? event. bus_id : "<unknown>", event.type, event.data); ptr = str; } if (chars_remaining < count) { count = chars_remaining; } if (copy_to_user(buffer, ptr, count)) return -EFAULT; *ppos += count; chars_remaining -= count; ptr += count; return count; } static int acpi_system_close_event(struct inode *inode, struct file *file) { spin_lock_irq(&acpi_system_event_lock); event_is_open = 0; spin_unlock_irq(&acpi_system_event_lock); return 0; } static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait) { poll_wait(file, &acpi_bus_event_queue, wait); if (!list_empty(&acpi_bus_event_list)) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations acpi_system_event_ops = { .owner = THIS_MODULE, .open = acpi_system_open_event, .read = acpi_system_read_event, .release = acpi_system_close_event, .poll = acpi_system_poll_event, .llseek = default_llseek, }; #endif /* CONFIG_ACPI_PROC_EVENT */ /* ACPI notifier chain */ static BLOCKING_NOTIFIER_HEAD(acpi_chain_head); int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data) { struct acpi_bus_event event; strcpy(event.device_class, dev->pnp.device_class); strcpy(event.bus_id, dev->pnp.bus_id); event.type = type; event.data = data; return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event) == NOTIFY_BAD) ? -EINVAL : 0; } EXPORT_SYMBOL(acpi_notifier_call_chain); int register_acpi_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&acpi_chain_head, nb); } EXPORT_SYMBOL(register_acpi_notifier); int unregister_acpi_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&acpi_chain_head, nb); } EXPORT_SYMBOL(unregister_acpi_notifier); #ifdef CONFIG_NET static unsigned int acpi_event_seqnum; struct acpi_genl_event { acpi_device_class device_class; char bus_id[15]; u32 type; u32 data; }; /* attributes of acpi_genl_family */ enum { ACPI_GENL_ATTR_UNSPEC, ACPI_GENL_ATTR_EVENT, /* ACPI event info needed by user space */ __ACPI_GENL_ATTR_MAX, }; #define ACPI_GENL_ATTR_MAX (__ACPI_GENL_ATTR_MAX - 1) /* commands supported by the acpi_genl_family */ enum { ACPI_GENL_CMD_UNSPEC, ACPI_GENL_CMD_EVENT, /* kernel->user notifications for ACPI events */ __ACPI_GENL_CMD_MAX, }; #define ACPI_GENL_CMD_MAX (__ACPI_GENL_CMD_MAX - 1) #define ACPI_GENL_FAMILY_NAME "acpi_event" #define ACPI_GENL_VERSION 0x01 #define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group" static struct genl_family acpi_event_genl_family = { .id = GENL_ID_GENERATE, .name = ACPI_GENL_FAMILY_NAME, .version = ACPI_GENL_VERSION, .maxattr = ACPI_GENL_ATTR_MAX, }; static struct genl_multicast_group acpi_event_mcgrp = { .name = ACPI_GENL_MCAST_GROUP_NAME, }; int acpi_bus_generate_netlink_event(const char *device_class, const char *bus_id, u8 type, int data) { struct sk_buff *skb; struct nlattr *attr; struct acpi_genl_event *event; void *msg_header; int size; int result; /* allocate memory */ size = nla_total_size(sizeof(struct acpi_genl_event)) + nla_total_size(0); skb = genlmsg_new(size, GFP_ATOMIC); if (!skb) return -ENOMEM; /* add the genetlink message header */ msg_header = genlmsg_put(skb, 0, acpi_event_seqnum++, &acpi_event_genl_family, 0, ACPI_GENL_CMD_EVENT); if (!msg_header) { nlmsg_free(skb); return -ENOMEM; } /* fill the data */ attr = nla_reserve(skb, ACPI_GENL_ATTR_EVENT, sizeof(struct acpi_genl_event)); if (!attr) { nlmsg_free(skb); return -EINVAL; } event = nla_data(attr); if (!event) { nlmsg_free(skb); return -EINVAL; } memset(event, 0, sizeof(struct acpi_genl_event)); strcpy(event->device_class, device_class); strcpy(event->bus_id, bus_id); event->type = type; event->data = data; /* send multicast genetlink message */ result = genlmsg_end(skb, msg_header); if (result < 0) { nlmsg_free(skb); return result; } genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC); return 0; } EXPORT_SYMBOL(acpi_bus_generate_netlink_event); static int acpi_event_genetlink_init(void) { int result; result = genl_register_family(&acpi_event_genl_family); if (result) return result; result = genl_register_mc_group(&acpi_event_genl_family, &acpi_event_mcgrp); if (result) genl_unregister_family(&acpi_event_genl_family); return result; } #else int acpi_bus_generate_netlink_event(const char *device_class, const char *bus_id, u8 type, int data) { return 0; } EXPORT_SYMBOL(acpi_bus_generate_netlink_event); static int acpi_event_genetlink_init(void) { return -ENODEV; } #endif static int __init acpi_event_init(void) { #ifdef CONFIG_ACPI_PROC_EVENT struct proc_dir_entry *entry; #endif int error = 0; if (acpi_disabled) return 0; /* create genetlink for acpi event */ error = acpi_event_genetlink_init(); if (error) printk(KERN_WARNING PREFIX "Failed to create genetlink family for ACPI event\n"); #ifdef CONFIG_ACPI_PROC_EVENT /* 'event' [R] */ entry = proc_create("event", S_IRUSR, acpi_root_dir, &acpi_system_event_ops); if (!entry) return -ENODEV; #endif return 0; } fs_initcall(acpi_event_init);
gpl-2.0
mericon/Xp_Kernel_LGH850
drivers/power/ab8500_bmdata.c
4377
16933
#include <linux/export.h> #include <linux/power_supply.h> #include <linux/of.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/mfd/abx500/ab8500-bm.h> /* * These are the defined batteries that uses a NTC and ID resistor placed * inside of the battery pack. * Note that the res_to_temp table must be strictly sorted by falling resistance * values to work. */ const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[] = { {-5, 53407}, { 0, 48594}, { 5, 43804}, {10, 39188}, {15, 34870}, {20, 30933}, {25, 27422}, {30, 24347}, {35, 21694}, {40, 19431}, {45, 17517}, {50, 15908}, {55, 14561}, {60, 13437}, {65, 12500}, }; EXPORT_SYMBOL(ab8500_temp_tbl_a_thermistor); const int ab8500_temp_tbl_a_size = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor); EXPORT_SYMBOL(ab8500_temp_tbl_a_size); const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[] = { {-5, 200000}, { 0, 159024}, { 5, 151921}, {10, 144300}, {15, 136424}, {20, 128565}, {25, 120978}, {30, 113875}, {35, 107397}, {40, 101629}, {45, 96592}, {50, 92253}, {55, 88569}, {60, 85461}, {65, 82869}, }; EXPORT_SYMBOL(ab8500_temp_tbl_b_thermistor); const int ab8500_temp_tbl_b_size = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor); EXPORT_SYMBOL(ab8500_temp_tbl_b_size); static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = { {4171, 100}, {4114, 95}, {4009, 83}, {3947, 74}, {3907, 67}, {3863, 59}, {3830, 56}, {3813, 53}, {3791, 46}, {3771, 33}, {3754, 25}, {3735, 20}, {3717, 17}, {3681, 13}, {3664, 8}, {3651, 6}, {3635, 5}, {3560, 3}, {3408, 1}, {3247, 0}, }; static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = { {4161, 100}, {4124, 98}, {4044, 90}, {4003, 85}, {3966, 80}, {3933, 75}, {3888, 67}, {3849, 60}, {3813, 55}, {3787, 47}, {3772, 30}, {3751, 25}, {3718, 20}, {3681, 16}, {3660, 14}, {3589, 10}, {3546, 7}, {3495, 4}, {3404, 2}, {3250, 0}, }; static const struct abx500_v_to_cap cap_tbl[] = { {4186, 100}, {4163, 99}, {4114, 95}, {4068, 90}, {3990, 80}, {3926, 70}, {3898, 65}, {3866, 60}, {3833, 55}, {3812, 50}, {3787, 40}, {3768, 30}, {3747, 25}, {3730, 20}, {3705, 15}, {3699, 14}, {3684, 12}, {3672, 9}, {3657, 7}, {3638, 6}, {3556, 4}, {3424, 2}, {3317, 1}, {3094, 0}, }; /* * Note that the res_to_temp table must be strictly sorted by falling * resistance values to work. */ static const struct abx500_res_to_temp temp_tbl[] = { {-5, 214834}, { 0, 162943}, { 5, 124820}, {10, 96520}, {15, 75306}, {20, 59254}, {25, 47000}, {30, 37566}, {35, 30245}, {40, 24520}, {45, 20010}, {50, 16432}, {55, 13576}, {60, 11280}, {65, 9425}, }; /* * Note that the batres_vs_temp table must be strictly sorted by falling * temperature values to work. */ static const struct batres_vs_temp temp_to_batres_tbl_thermistor[] = { { 40, 120}, { 30, 135}, { 20, 165}, { 10, 230}, { 00, 325}, {-10, 445}, {-20, 595}, }; /* * Note that the batres_vs_temp table must be strictly sorted by falling * temperature values to work. */ static const struct batres_vs_temp temp_to_batres_tbl_ext_thermistor[] = { { 60, 300}, { 30, 300}, { 20, 300}, { 10, 300}, { 00, 300}, {-10, 300}, {-20, 300}, }; /* battery resistance table for LI ION 9100 battery */ static const struct batres_vs_temp temp_to_batres_tbl_9100[] = { { 60, 180}, { 30, 180}, { 20, 180}, { 10, 180}, { 00, 180}, {-10, 180}, {-20, 180}, }; static struct abx500_battery_type bat_type_thermistor[] = { [BATTERY_UNKNOWN] = { /* First element always represent the UNKNOWN battery */ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN, .resis_high = 0, .resis_low = 0, .battery_resistance = 300, .charge_full_design = 612, .nominal_voltage = 3700, .termination_vol = 4050, .termination_curr = 200, .recharge_cap = 95, .normal_cur_lvl = 400, .normal_vol_lvl = 4100, .maint_a_cur_lvl = 400, .maint_a_vol_lvl = 4050, .maint_a_chg_timer_h = 60, .maint_b_cur_lvl = 400, .maint_b_vol_lvl = 4000, .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), .r_to_t_tbl = temp_tbl, .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), .v_to_cap_tbl = cap_tbl, .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), .batres_tbl = temp_to_batres_tbl_thermistor, }, { .name = POWER_SUPPLY_TECHNOLOGY_LIPO, .resis_high = 53407, .resis_low = 12500, .battery_resistance = 300, .charge_full_design = 900, .nominal_voltage = 3600, .termination_vol = 4150, .termination_curr = 80, .recharge_cap = 95, .normal_cur_lvl = 700, .normal_vol_lvl = 4200, .maint_a_cur_lvl = 600, .maint_a_vol_lvl = 4150, .maint_a_chg_timer_h = 60, .maint_b_cur_lvl = 600, .maint_b_vol_lvl = 4100, .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, .n_temp_tbl_elements = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor), .r_to_t_tbl = ab8500_temp_tbl_a_thermistor, .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_a_thermistor), .v_to_cap_tbl = cap_tbl_a_thermistor, .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), .batres_tbl = temp_to_batres_tbl_thermistor, }, { .name = POWER_SUPPLY_TECHNOLOGY_LIPO, .resis_high = 200000, .resis_low = 82869, .battery_resistance = 300, .charge_full_design = 900, .nominal_voltage = 3600, .termination_vol = 4150, .termination_curr = 80, .recharge_cap = 95, .normal_cur_lvl = 700, .normal_vol_lvl = 4200, .maint_a_cur_lvl = 600, .maint_a_vol_lvl = 4150, .maint_a_chg_timer_h = 60, .maint_b_cur_lvl = 600, .maint_b_vol_lvl = 4100, .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, .n_temp_tbl_elements = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor), .r_to_t_tbl = ab8500_temp_tbl_b_thermistor, .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_b_thermistor), .v_to_cap_tbl = cap_tbl_b_thermistor, .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), .batres_tbl = temp_to_batres_tbl_thermistor, }, }; static struct abx500_battery_type bat_type_ext_thermistor[] = { [BATTERY_UNKNOWN] = { /* First element always represent the UNKNOWN battery */ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN, .resis_high = 0, .resis_low = 0, .battery_resistance = 300, .charge_full_design = 612, .nominal_voltage = 3700, .termination_vol = 4050, .termination_curr = 200, .recharge_cap = 95, .normal_cur_lvl = 400, .normal_vol_lvl = 4100, .maint_a_cur_lvl = 400, .maint_a_vol_lvl = 4050, .maint_a_chg_timer_h = 60, .maint_b_cur_lvl = 400, .maint_b_vol_lvl = 4000, .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), .r_to_t_tbl = temp_tbl, .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), .v_to_cap_tbl = cap_tbl, .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), .batres_tbl = temp_to_batres_tbl_thermistor, }, /* * These are the batteries that doesn't have an internal NTC resistor to measure * its temperature. The temperature in this case is measure with a NTC placed * near the battery but on the PCB. */ { .name = POWER_SUPPLY_TECHNOLOGY_LIPO, .resis_high = 76000, .resis_low = 53000, .battery_resistance = 300, .charge_full_design = 900, .nominal_voltage = 3700, .termination_vol = 4150, .termination_curr = 100, .recharge_cap = 95, .normal_cur_lvl = 700, .normal_vol_lvl = 4200, .maint_a_cur_lvl = 600, .maint_a_vol_lvl = 4150, .maint_a_chg_timer_h = 60, .maint_b_cur_lvl = 600, .maint_b_vol_lvl = 4100, .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), .r_to_t_tbl = temp_tbl, .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), .v_to_cap_tbl = cap_tbl, .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), .batres_tbl = temp_to_batres_tbl_thermistor, }, { .name = POWER_SUPPLY_TECHNOLOGY_LION, .resis_high = 30000, .resis_low = 10000, .battery_resistance = 300, .charge_full_design = 950, .nominal_voltage = 3700, .termination_vol = 4150, .termination_curr = 100, .recharge_cap = 95, .normal_cur_lvl = 700, .normal_vol_lvl = 4200, .maint_a_cur_lvl = 600, .maint_a_vol_lvl = 4150, .maint_a_chg_timer_h = 60, .maint_b_cur_lvl = 600, .maint_b_vol_lvl = 4100, .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), .r_to_t_tbl = temp_tbl, .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), .v_to_cap_tbl = cap_tbl, .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), .batres_tbl = temp_to_batres_tbl_thermistor, }, { .name = POWER_SUPPLY_TECHNOLOGY_LION, .resis_high = 95000, .resis_low = 76001, .battery_resistance = 300, .charge_full_design = 950, .nominal_voltage = 3700, .termination_vol = 4150, .termination_curr = 100, .recharge_cap = 95, .normal_cur_lvl = 700, .normal_vol_lvl = 4200, .maint_a_cur_lvl = 600, .maint_a_vol_lvl = 4150, .maint_a_chg_timer_h = 60, .maint_b_cur_lvl = 600, .maint_b_vol_lvl = 4100, .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl), .r_to_t_tbl = temp_tbl, .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl), .v_to_cap_tbl = cap_tbl, .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor), .batres_tbl = temp_to_batres_tbl_thermistor, }, }; static const struct abx500_bm_capacity_levels cap_levels = { .critical = 2, .low = 10, .normal = 70, .high = 95, .full = 100, }; static const struct abx500_fg_parameters fg = { .recovery_sleep_timer = 10, .recovery_total_time = 100, .init_timer = 1, .init_discard_time = 5, .init_total_time = 40, .high_curr_time = 60, .accu_charging = 30, .accu_high_curr = 30, .high_curr_threshold = 50, .lowbat_threshold = 3100, .battok_falling_th_sel0 = 2860, .battok_raising_th_sel1 = 2860, .maint_thres = 95, .user_cap_limit = 15, .pcut_enable = 1, .pcut_max_time = 127, .pcut_flag_time = 112, .pcut_max_restart = 15, .pcut_debounce_time = 2, }; static const struct abx500_maxim_parameters ab8500_maxi_params = { .ena_maxi = true, .chg_curr = 910, .wait_cycles = 10, .charger_curr_step = 100, }; static const struct abx500_maxim_parameters abx540_maxi_params = { .ena_maxi = true, .chg_curr = 3000, .wait_cycles = 10, .charger_curr_step = 200, }; static const struct abx500_bm_charger_parameters chg = { .usb_volt_max = 5500, .usb_curr_max = 1500, .ac_volt_max = 7500, .ac_curr_max = 1500, }; /* * This array maps the raw hex value to charger output current used by the * AB8500 values */ static int ab8500_charge_output_curr_map[] = { 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1500, }; static int ab8540_charge_output_curr_map[] = { 0, 0, 0, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 425, 450, 475, 500, 525, 550, 575, 600, 625, 650, 675, 700, 725, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425, 1450, 1500, 1600, 1700, 1900, 2000, }; /* * This array maps the raw hex value to charger input current used by the * AB8500 values */ static int ab8500_charge_input_curr_map[] = { 50, 98, 193, 290, 380, 450, 500, 600, 700, 800, 900, 1000, 1100, 1300, 1400, 1500, }; static int ab8540_charge_input_curr_map[] = { 25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 425, 450, 475, 500, 525, 550, 575, 600, 625, 650, 675, 700, 725, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, 1500, 1500, 1500, 1500, }; struct abx500_bm_data ab8500_bm_data = { .temp_under = 3, .temp_low = 8, .temp_high = 43, .temp_over = 48, .main_safety_tmr_h = 4, .temp_interval_chg = 20, .temp_interval_nochg = 120, .usb_safety_tmr_h = 4, .bkup_bat_v = BUP_VCH_SEL_2P6V, .bkup_bat_i = BUP_ICH_SEL_150UA, .no_maintenance = false, .capacity_scaling = false, .adc_therm = ABx500_ADC_THERM_BATCTRL, .chg_unknown_bat = false, .enable_overshoot = false, .fg_res = 100, .cap_levels = &cap_levels, .bat_type = bat_type_thermistor, .n_btypes = ARRAY_SIZE(bat_type_thermistor), .batt_id = 0, .interval_charging = 5, .interval_not_charging = 120, .temp_hysteresis = 3, .gnd_lift_resistance = 34, .chg_output_curr = ab8500_charge_output_curr_map, .n_chg_out_curr = ARRAY_SIZE(ab8500_charge_output_curr_map), .maxi = &ab8500_maxi_params, .chg_params = &chg, .fg_params = &fg, .chg_input_curr = ab8500_charge_input_curr_map, .n_chg_in_curr = ARRAY_SIZE(ab8500_charge_input_curr_map), }; struct abx500_bm_data ab8540_bm_data = { .temp_under = 3, .temp_low = 8, .temp_high = 43, .temp_over = 48, .main_safety_tmr_h = 4, .temp_interval_chg = 20, .temp_interval_nochg = 120, .usb_safety_tmr_h = 4, .bkup_bat_v = BUP_VCH_SEL_2P6V, .bkup_bat_i = BUP_ICH_SEL_150UA, .no_maintenance = false, .capacity_scaling = false, .adc_therm = ABx500_ADC_THERM_BATCTRL, .chg_unknown_bat = false, .enable_overshoot = false, .fg_res = 100, .cap_levels = &cap_levels, .bat_type = bat_type_thermistor, .n_btypes = ARRAY_SIZE(bat_type_thermistor), .batt_id = 0, .interval_charging = 5, .interval_not_charging = 120, .temp_hysteresis = 3, .gnd_lift_resistance = 0, .maxi = &abx540_maxi_params, .chg_params = &chg, .fg_params = &fg, .chg_output_curr = ab8540_charge_output_curr_map, .n_chg_out_curr = ARRAY_SIZE(ab8540_charge_output_curr_map), .chg_input_curr = ab8540_charge_input_curr_map, .n_chg_in_curr = ARRAY_SIZE(ab8540_charge_input_curr_map), }; int ab8500_bm_of_probe(struct device *dev, struct device_node *np, struct abx500_bm_data *bm) { const struct batres_vs_temp *tmp_batres_tbl; struct device_node *battery_node; const char *btech; int i; /* get phandle to 'battery-info' node */ battery_node = of_parse_phandle(np, "battery", 0); if (!battery_node) { dev_err(dev, "battery node or reference missing\n"); return -EINVAL; } btech = of_get_property(battery_node, "stericsson,battery-type", NULL); if (!btech) { dev_warn(dev, "missing property battery-name/type\n"); return -EINVAL; } if (strncmp(btech, "LION", 4) == 0) { bm->no_maintenance = true; bm->chg_unknown_bat = true; bm->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600; bm->bat_type[BATTERY_UNKNOWN].termination_vol = 4150; bm->bat_type[BATTERY_UNKNOWN].recharge_cap = 95; bm->bat_type[BATTERY_UNKNOWN].normal_cur_lvl = 520; bm->bat_type[BATTERY_UNKNOWN].normal_vol_lvl = 4200; } if (of_property_read_bool(battery_node, "thermistor-on-batctrl")) { if (strncmp(btech, "LION", 4) == 0) tmp_batres_tbl = temp_to_batres_tbl_9100; else tmp_batres_tbl = temp_to_batres_tbl_thermistor; } else { bm->n_btypes = 4; bm->bat_type = bat_type_ext_thermistor; bm->adc_therm = ABx500_ADC_THERM_BATTEMP; tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor; } /* select the battery resolution table */ for (i = 0; i < bm->n_btypes; ++i) bm->bat_type[i].batres_tbl = tmp_batres_tbl; of_node_put(battery_node); return 0; }
gpl-2.0
hashok/linux-3.3.8-bluetooth-port
drivers/media/video/davinci/vpif_capture.c
4889
63335
/* * Copyright (C) 2009 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * TODO : add support for VBI & HBI data service * add static buffer allocation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/string.h> #include <linux/videodev2.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-chip-ident.h> #include "vpif_capture.h" #include "vpif.h" MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VPIF_CAPTURE_VERSION); #define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) #define vpif_dbg(level, debug, fmt, arg...) \ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) static int debug = 1; static u32 ch0_numbuffers = 3; static u32 ch1_numbuffers = 3; static u32 ch0_bufsize = 1920 * 1080 * 2; static u32 ch1_bufsize = 720 * 576 * 2; module_param(debug, int, 0644); module_param(ch0_numbuffers, uint, S_IRUGO); module_param(ch1_numbuffers, uint, S_IRUGO); module_param(ch0_bufsize, uint, S_IRUGO); module_param(ch1_bufsize, uint, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level 0-1"); MODULE_PARM_DESC(ch2_numbuffers, "Channel0 buffer count (default:3)"); MODULE_PARM_DESC(ch3_numbuffers, "Channel1 buffer count (default:3)"); MODULE_PARM_DESC(ch2_bufsize, "Channel0 buffer size (default:1920 x 1080 x 2)"); MODULE_PARM_DESC(ch3_bufsize, "Channel1 buffer size (default:720 x 576 x 2)"); static struct vpif_config_params config_params = { .min_numbuffers = 3, .numbuffers[0] = 3, .numbuffers[1] = 3, .min_bufsize[0] = 720 * 480 * 2, .min_bufsize[1] = 720 * 480 * 2, .channel_bufsize[0] = 1920 * 1080 * 2, .channel_bufsize[1] = 720 * 576 * 2, }; /* global variables */ static struct vpif_device vpif_obj = { {NULL} }; static struct device *vpif_dev; /** * vpif_uservirt_to_phys : translate user/virtual address to phy address * @virtp: user/virtual address * * This inline function is used to convert user space virtual address to * physical address. */ static inline u32 vpif_uservirt_to_phys(u32 virtp) { unsigned long physp = 0; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; vma = find_vma(mm, virtp); /* For kernel direct-mapped memory, take the easy way */ if (virtp >= PAGE_OFFSET) physp = virt_to_phys((void *)virtp); else if (vma && (vma->vm_flags & VM_IO) && (vma->vm_pgoff)) /** * this will catch, kernel-allocated, mmaped-to-usermode * addresses */ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start); else { /* otherwise, use get_user_pages() for general userland pages */ int res, nr_pages = 1; struct page *pages; down_read(&current->mm->mmap_sem); res = get_user_pages(current, current->mm, virtp, nr_pages, 1, 0, &pages, NULL); up_read(&current->mm->mmap_sem); if (res == nr_pages) physp = __pa(page_address(&pages[0]) + (virtp & ~PAGE_MASK)); else { vpif_err("get_user_pages failed\n"); return 0; } } return physp; } /** * buffer_prepare : callback function for buffer prepare * @q : buffer queue ptr * @vb: ptr to video buffer * @field: field info * * This is the callback function for buffer prepare when videobuf_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpif_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; unsigned long addr; vpif_dbg(2, debug, "vpif_buffer_prepare\n"); common = &ch->common[VPIF_VIDEO_INDEX]; /* If buffer is not initialized, initialize it */ if (VIDEOBUF_NEEDS_INIT == vb->state) { vb->width = common->width; vb->height = common->height; vb->size = vb->width * vb->height; vb->field = field; } vb->state = VIDEOBUF_PREPARED; /** * if user pointer memory mechanism is used, get the physical * address of the buffer */ if (V4L2_MEMORY_USERPTR == common->memory) { if (0 == vb->baddr) { vpif_dbg(1, debug, "buffer address is 0\n"); return -EINVAL; } vb->boff = vpif_uservirt_to_phys(vb->baddr); if (!IS_ALIGNED(vb->boff, 8)) goto exit; } addr = vb->boff; if (q->streaming) { if (!IS_ALIGNED((addr + common->ytop_off), 8) || !IS_ALIGNED((addr + common->ybtm_off), 8) || !IS_ALIGNED((addr + common->ctop_off), 8) || !IS_ALIGNED((addr + common->cbtm_off), 8)) goto exit; } return 0; exit: vpif_dbg(1, debug, "buffer_prepare:offset is not aligned to 8 bytes\n"); return -EINVAL; } /** * vpif_buffer_setup : Callback function for buffer setup. * @q: buffer queue ptr * @count: number of buffers * @size: size of the buffer * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int vpif_buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_setup\n"); /* If memory type is not mmap, return */ if (V4L2_MEMORY_MMAP != common->memory) return 0; /* Calculate the size of the buffer */ *size = config_params.channel_bufsize[ch->channel_id]; if (*count < config_params.min_numbuffers) *count = config_params.min_numbuffers; return 0; } /** * vpif_buffer_queue : Callback function to add buffer to DMA queue * @q: ptr to videobuf_queue * @vb: ptr to videobuf_buffer */ static void vpif_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_queue\n"); /* add the buffer to the DMA queue */ list_add_tail(&vb->queue, &common->dma_queue); /* Change state of the buffer */ vb->state = VIDEOBUF_QUEUED; } /** * vpif_buffer_release : Callback function to free buffer * @q: buffer queue ptr * @vb: ptr to video buffer * * This function is called from the videobuf layer to free memory * allocated to the buffers */ static void vpif_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { /* Get the file handle object and channel object */ struct vpif_fh *fh = q->priv_data; struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; videobuf_dma_contig_free(q, vb); vb->state = VIDEOBUF_NEEDS_INIT; } static struct videobuf_queue_ops video_qops = { .buf_setup = vpif_buffer_setup, .buf_prepare = vpif_buffer_prepare, .buf_queue = vpif_buffer_queue, .buf_release = vpif_buffer_release, }; static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} }; /** * vpif_process_buffer_complete: process a completed buffer * @common: ptr to common channel object * * This function time stamp the buffer and mark it as DONE. It also * wake up any process waiting on the QUEUE and set the next buffer * as current */ static void vpif_process_buffer_complete(struct common_obj *common) { do_gettimeofday(&common->cur_frm->ts); common->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&common->cur_frm->done); /* Make curFrm pointing to nextFrm */ common->cur_frm = common->next_frm; } /** * vpif_schedule_next_buffer: set next buffer address for capture * @common : ptr to common channel object * * This function will get next buffer from the dma queue and * set the buffer address in the vpif register for capture. * the buffer is marked active */ static void vpif_schedule_next_buffer(struct common_obj *common) { unsigned long addr = 0; common->next_frm = list_entry(common->dma_queue.next, struct videobuf_buffer, queue); /* Remove that buffer from the buffer queue */ list_del(&common->next_frm->queue); common->next_frm->state = VIDEOBUF_ACTIVE; if (V4L2_MEMORY_USERPTR == common->memory) addr = common->next_frm->boff; else addr = videobuf_to_dma_contig(common->next_frm); /* Set top and bottom field addresses in VPIF registers */ common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); } /** * vpif_channel_isr : ISR handler for vpif capture * @irq: irq number * @dev_id: dev_id ptr * * It changes status of the captured buffer, takes next buffer from the queue * and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct common_obj *common; struct channel_obj *ch; enum v4l2_field field; int channel_id = 0; int fid = -1, i; channel_id = *(int *)(dev_id); ch = dev->dev[channel_id]; field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field; for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) { common = &ch->common[i]; /* skip If streaming is not started in this channel */ if (0 == common->started) continue; /* Check the field format */ if (1 == ch->vpifparams.std_info.frm_fmt) { /* Progressive mode */ if (list_empty(&common->dma_queue)) continue; if (!channel_first_int[i][channel_id]) vpif_process_buffer_complete(common); channel_first_int[i][channel_id] = 0; vpif_schedule_next_buffer(common); channel_first_int[i][channel_id] = 0; } else { /** * Interlaced mode. If it is first interrupt, ignore * it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id); if (fid != ch->field_id) { /** * If field id does not match stored * field id, make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } /* device field id and local field id are in sync */ if (0 == fid) { /* this is even field */ if (common->cur_frm == common->next_frm) continue; /* mark the current buffer as done */ vpif_process_buffer_complete(common); } else if (1 == fid) { /* odd field */ if (list_empty(&common->dma_queue) || (common->cur_frm != common->next_frm)) continue; vpif_schedule_next_buffer(common); } } } return IRQ_HANDLED; } /** * vpif_update_std_info() - update standard related info * @ch: ptr to channel object * * For a given standard selected by application, update values * in the device data structures */ static int vpif_update_std_info(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpifparams = &ch->vpifparams; const struct vpif_channel_config_params *config; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; int index; vpif_dbg(2, debug, "vpif_update_std_info\n"); for (index = 0; index < vpif_ch_params_count; index++) { config = &ch_params[index]; if (config->hd_sd == 0) { vpif_dbg(2, debug, "SD format\n"); if (config->stdid & vid_ch->stdid) { memcpy(std_info, config, sizeof(*config)); break; } } else { vpif_dbg(2, debug, "HD format\n"); if (config->dv_preset == vid_ch->dv_preset) { memcpy(std_info, config, sizeof(*config)); break; } } } /* standard not found */ if (index == vpif_ch_params_count) return -EINVAL; common->fmt.fmt.pix.width = std_info->width; common->width = std_info->width; common->fmt.fmt.pix.height = std_info->height; common->height = std_info->height; common->fmt.fmt.pix.bytesperline = std_info->width; vpifparams->video_params.hpitch = std_info->width; vpifparams->video_params.storage_mode = std_info->frm_fmt; return 0; } /** * vpif_calculate_offsets : This function calculates buffers offsets * @ch : ptr to channel object * * This function calculates buffer offsets for Y and C in the top and * bottom field */ static void vpif_calculate_offsets(struct channel_obj *ch) { unsigned int hpitch, vpitch, sizeimage; struct video_obj *vid_ch = &(ch->video); struct vpif_params *vpifparams = &ch->vpifparams; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; enum v4l2_field field = common->fmt.fmt.pix.field; vpif_dbg(2, debug, "vpif_calculate_offsets\n"); if (V4L2_FIELD_ANY == field) { if (vpifparams->std_info.frm_fmt) vid_ch->buf_field = V4L2_FIELD_NONE; else vid_ch->buf_field = V4L2_FIELD_INTERLACED; } else vid_ch->buf_field = common->fmt.fmt.pix.field; if (V4L2_MEMORY_USERPTR == common->memory) sizeimage = common->fmt.fmt.pix.sizeimage; else sizeimage = config_params.channel_bufsize[ch->channel_id]; hpitch = common->fmt.fmt.pix.bytesperline; vpitch = sizeimage / (hpitch * 2); if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = hpitch; common->ctop_off = sizeimage / 2; common->cbtm_off = sizeimage / 2 + hpitch; } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = sizeimage / 4; common->ctop_off = sizeimage / 2; common->cbtm_off = common->ctop_off + sizeimage / 4; } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ybtm_off = 0; common->ytop_off = sizeimage / 4; common->cbtm_off = sizeimage / 2; common->ctop_off = common->cbtm_off + sizeimage / 4; } if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) vpifparams->video_params.storage_mode = 1; else vpifparams->video_params.storage_mode = 0; if (1 == vpifparams->std_info.frm_fmt) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; else { if ((field == V4L2_FIELD_ANY) || (field == V4L2_FIELD_INTERLACED)) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline * 2; else vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; } /** * vpif_config_format: configure default frame format in the device * ch : ptr to channel object */ static void vpif_config_format(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_config_format\n"); common->fmt.fmt.pix.field = V4L2_FIELD_ANY; if (config_params.numbuffers[ch->channel_id] == 0) common->memory = V4L2_MEMORY_USERPTR; else common->memory = V4L2_MEMORY_MMAP; common->fmt.fmt.pix.sizeimage = config_params.channel_bufsize[ch->channel_id]; if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; else common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P; common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; } /** * vpif_get_default_field() - Get default field type based on interface * @vpif_params - ptr to vpif params */ static inline enum v4l2_field vpif_get_default_field( struct vpif_interface *iface) { return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; } /** * vpif_check_format() - check given pixel format for compatibility * @ch - channel ptr * @pixfmt - Given pixel format * @update - update the values as per hardware requirement * * Check the application pixel format for S_FMT and update the input * values as per hardware limits for TRY_FMT. The default pixel and * field format is selected based on interface type. */ static int vpif_check_format(struct channel_obj *ch, struct v4l2_pix_format *pixfmt, int update) { struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); struct vpif_params *vpif_params = &ch->vpifparams; enum v4l2_field field = pixfmt->field; u32 sizeimage, hpitch, vpitch; int ret = -EINVAL; vpif_dbg(2, debug, "vpif_check_format\n"); /** * first check for the pixel format. If if_type is Raw bayer, * only V4L2_PIX_FMT_SBGGR8 format is supported. Otherwise only * V4L2_PIX_FMT_YUV422P is supported */ if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) { if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8) { if (!update) { vpif_dbg(2, debug, "invalid pix format\n"); goto exit; } pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } } else { if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) { if (!update) { vpif_dbg(2, debug, "invalid pixel format\n"); goto exit; } pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P; } } if (!(VPIF_VALID_FIELD(field))) { if (!update) { vpif_dbg(2, debug, "invalid field format\n"); goto exit; } /** * By default use FIELD_NONE for RAW Bayer capture * and FIELD_INTERLACED for other interfaces */ field = vpif_get_default_field(&vpif_params->iface); } else if (field == V4L2_FIELD_ANY) /* unsupported field. Use default */ field = vpif_get_default_field(&vpif_params->iface); /* validate the hpitch */ hpitch = pixfmt->bytesperline; if (hpitch < vpif_params->std_info.width) { if (!update) { vpif_dbg(2, debug, "invalid hpitch\n"); goto exit; } hpitch = vpif_params->std_info.width; } if (V4L2_MEMORY_USERPTR == common->memory) sizeimage = pixfmt->sizeimage; else sizeimage = config_params.channel_bufsize[ch->channel_id]; vpitch = sizeimage / (hpitch * 2); /* validate the vpitch */ if (vpitch < vpif_params->std_info.height) { if (!update) { vpif_dbg(2, debug, "Invalid vpitch\n"); goto exit; } vpitch = vpif_params->std_info.height; } /* Check for 8 byte alignment */ if (!ALIGN(hpitch, 8)) { if (!update) { vpif_dbg(2, debug, "invalid pitch alignment\n"); goto exit; } /* adjust to next 8 byte boundary */ hpitch = (((hpitch + 7) / 8) * 8); } /* if update is set, modify the bytesperline and sizeimage */ if (update) { pixfmt->bytesperline = hpitch; pixfmt->sizeimage = hpitch * vpitch * 2; } /** * Image width and height is always based on current standard width and * height */ pixfmt->width = common->fmt.fmt.pix.width; pixfmt->height = common->fmt.fmt.pix.height; return 0; exit: return ret; } /** * vpif_config_addr() - function to configure buffer address in vpif * @ch - channel ptr * @muxmode - channel mux mode */ static void vpif_config_addr(struct channel_obj *ch, int muxmode) { struct common_obj *common; vpif_dbg(2, debug, "vpif_config_addr\n"); common = &(ch->common[VPIF_VIDEO_INDEX]); if (VPIF_CHANNEL1_VIDEO == ch->channel_id) common->set_addr = ch1_set_videobuf_addr; else if (2 == muxmode) common->set_addr = ch0_set_videobuf_addr_yc_nmux; else common->set_addr = ch0_set_videobuf_addr; } /** * vpfe_mmap : It is used to map kernel space buffers into user spaces * @filep: file pointer * @vma: ptr to vm_area_struct */ static int vpif_mmap(struct file *filep, struct vm_area_struct *vma) { /* Get the channel object and file handle object */ struct vpif_fh *fh = filep->private_data; struct channel_obj *ch = fh->channel; struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); vpif_dbg(2, debug, "vpif_mmap\n"); return videobuf_mmap_mapper(&common->buffer_queue, vma); } /** * vpif_poll: It is used for select/poll system call * @filep: file pointer * @wait: poll table to wait */ static unsigned int vpif_poll(struct file *filep, poll_table * wait) { struct vpif_fh *fh = filep->private_data; struct channel_obj *channel = fh->channel; struct common_obj *common = &(channel->common[VPIF_VIDEO_INDEX]); vpif_dbg(2, debug, "vpif_poll\n"); if (common->started) return videobuf_poll_stream(filep, &common->buffer_queue, wait); return 0; } /** * vpif_open : vpif open handler * @filep: file ptr * * It creates object of file handle structure and stores it in private_data * member of filepointer */ static int vpif_open(struct file *filep) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(filep); struct common_obj *common; struct video_obj *vid_ch; struct channel_obj *ch; struct vpif_fh *fh; int i; vpif_dbg(2, debug, "vpif_open\n"); ch = video_get_drvdata(vdev); vid_ch = &ch->video; common = &ch->common[VPIF_VIDEO_INDEX]; if (NULL == ch->curr_subdev_info) { /** * search through the sub device to see a registered * sub device and make it as current sub device */ for (i = 0; i < config->subdev_count; i++) { if (vpif_obj.sd[i]) { /* the sub device is registered */ ch->curr_subdev_info = &config->subdev_info[i]; /* make first input as the current input */ vid_ch->input_idx = 0; break; } } if (i == config->subdev_count) { vpif_err("No sub device registered\n"); return -ENOENT; } } /* Allocate memory for the file handle object */ fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL); if (NULL == fh) { vpif_err("unable to allocate memory for file handle object\n"); return -ENOMEM; } /* store pointer to fh in private_data member of filep */ filep->private_data = fh; fh->channel = ch; fh->initialized = 0; /* If decoder is not initialized. initialize it */ if (!ch->initialized) { fh->initialized = 1; ch->initialized = 1; memset(&(ch->vpifparams), 0, sizeof(struct vpif_params)); } /* Increment channel usrs counter */ ch->usrs++; /* Set io_allowed member to false */ fh->io_allowed[VPIF_VIDEO_INDEX] = 0; /* Initialize priority of this instance to default priority */ fh->prio = V4L2_PRIORITY_UNSET; v4l2_prio_open(&ch->prio, &fh->prio); return 0; } /** * vpif_release : function to clean up file close * @filep: file pointer * * This function deletes buffer queue, frees the buffers and the vpfe file * handle */ static int vpif_release(struct file *filep) { struct vpif_fh *fh = filep->private_data; struct channel_obj *ch = fh->channel; struct common_obj *common; vpif_dbg(2, debug, "vpif_release\n"); common = &ch->common[VPIF_VIDEO_INDEX]; /* if this instance is doing IO */ if (fh->io_allowed[VPIF_VIDEO_INDEX]) { /* Reset io_usrs member of channel object */ common->io_usrs = 0; /* Disable channel as per its device type and channel id */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || (2 == common->started)) { enable_channel1(0); channel1_intr_enable(0); } common->started = 0; /* Free buffers allocated */ videobuf_queue_cancel(&common->buffer_queue); videobuf_mmap_free(&common->buffer_queue); } /* Decrement channel usrs counter */ ch->usrs--; /* Close the priority */ v4l2_prio_close(&ch->prio, fh->prio); if (fh->initialized) ch->initialized = 0; filep->private_data = NULL; kfree(fh); return 0; } /** * vpif_reqbufs() - request buffer handler * @file: file ptr * @priv: file handle * @reqbuf: request buffer structure ptr */ static int vpif_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbuf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common; u8 index = 0; vpif_dbg(2, debug, "vpif_reqbufs\n"); /** * This file handle has not initialized the channel, * It is not allowed to do settings */ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } if (V4L2_BUF_TYPE_VIDEO_CAPTURE != reqbuf->type) return -EINVAL; index = VPIF_VIDEO_INDEX; common = &ch->common[index]; if (0 != common->io_usrs) return -EBUSY; /* Initialize videobuf queue as per the buffer type */ videobuf_queue_dma_contig_init(&common->buffer_queue, &video_qops, NULL, &common->irqlock, reqbuf->type, common->fmt.fmt.pix.field, sizeof(struct videobuf_buffer), fh, &common->lock); /* Set io allowed member of file handle to TRUE */ fh->io_allowed[index] = 1; /* Increment io usrs member of channel object to 1 */ common->io_usrs = 1; /* Store type of memory requested in channel object */ common->memory = reqbuf->memory; INIT_LIST_HEAD(&common->dma_queue); /* Allocate buffers */ return videobuf_reqbufs(&common->buffer_queue, reqbuf); } /** * vpif_querybuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_querybuf\n"); if (common->fmt.type != buf->type) return -EINVAL; if (common->memory != V4L2_MEMORY_MMAP) { vpif_dbg(1, debug, "Invalid memory\n"); return -EINVAL; } return videobuf_querybuf(&common->buffer_queue, buf); } /** * vpif_qbuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_buffer tbuf = *buf; struct videobuf_buffer *buf1; unsigned long addr = 0; unsigned long flags; int ret = 0; vpif_dbg(2, debug, "vpif_qbuf\n"); if (common->fmt.type != tbuf.type) { vpif_err("invalid buffer type\n"); return -EINVAL; } if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_err("fh io not allowed \n"); return -EACCES; } if (!(list_empty(&common->dma_queue)) || (common->cur_frm != common->next_frm) || !common->started || (common->started && (0 == ch->field_id))) return videobuf_qbuf(&common->buffer_queue, buf); /* bufferqueue is empty store buffer address in VPIF registers */ mutex_lock(&common->buffer_queue.vb_lock); buf1 = common->buffer_queue.bufs[tbuf.index]; if ((buf1->state == VIDEOBUF_QUEUED) || (buf1->state == VIDEOBUF_ACTIVE)) { vpif_err("invalid state\n"); goto qbuf_exit; } switch (buf1->memory) { case V4L2_MEMORY_MMAP: if (buf1->baddr == 0) goto qbuf_exit; break; case V4L2_MEMORY_USERPTR: if (tbuf.length < buf1->bsize) goto qbuf_exit; if ((VIDEOBUF_NEEDS_INIT != buf1->state) && (buf1->baddr != tbuf.m.userptr)) { vpif_buffer_release(&common->buffer_queue, buf1); buf1->baddr = tbuf.m.userptr; } break; default: goto qbuf_exit; } local_irq_save(flags); ret = vpif_buffer_prepare(&common->buffer_queue, buf1, common->buffer_queue.field); if (ret < 0) { local_irq_restore(flags); goto qbuf_exit; } buf1->state = VIDEOBUF_ACTIVE; if (V4L2_MEMORY_USERPTR == common->memory) addr = buf1->boff; else addr = videobuf_to_dma_contig(buf1); common->next_frm = buf1; common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); local_irq_restore(flags); list_add_tail(&buf1->stream, &common->buffer_queue.stream); mutex_unlock(&common->buffer_queue.vb_lock); return 0; qbuf_exit: mutex_unlock(&common->buffer_queue.vb_lock); return -EINVAL; } /** * vpif_dqbuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_dqbuf\n"); return videobuf_dqbuf(&common->buffer_queue, buf, file->f_flags & O_NONBLOCK); } /** * vpif_streamon() - streamon handler * @file: file ptr * @priv: file handle * @buftype: v4l2 buffer type */ static int vpif_streamon(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id]; struct vpif_params *vpif; unsigned long addr = 0; int ret = 0; vpif_dbg(2, debug, "vpif_streamon\n"); vpif = &ch->vpifparams; if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) { vpif_dbg(1, debug, "buffer type not supported\n"); return -EINVAL; } /* If file handle is not allowed IO, return error */ if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_dbg(1, debug, "io not allowed\n"); return -EACCES; } /* If Streaming is already started, return error */ if (common->started) { vpif_dbg(1, debug, "channel->started\n"); return -EBUSY; } if ((ch->channel_id == VPIF_CHANNEL0_VIDEO && oth_ch->common[VPIF_VIDEO_INDEX].started && vpif->std_info.ycmux_mode == 0) || ((ch->channel_id == VPIF_CHANNEL1_VIDEO) && (2 == oth_ch->common[VPIF_VIDEO_INDEX].started))) { vpif_dbg(1, debug, "other channel is being used\n"); return -EBUSY; } ret = vpif_check_format(ch, &common->fmt.fmt.pix, 0); if (ret) return ret; /* Enable streamon on the sub device */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_stream, 1); if (ret && (ret != -ENOIOCTLCMD)) { vpif_dbg(1, debug, "stream on failed in subdev\n"); return ret; } /* Call videobuf_streamon to start streaming in videobuf */ ret = videobuf_streamon(&common->buffer_queue); if (ret) { vpif_dbg(1, debug, "videobuf_streamon\n"); return ret; } /* If buffer queue is empty, return error */ if (list_empty(&common->dma_queue)) { vpif_dbg(1, debug, "buffer queue is empty\n"); ret = -EIO; goto exit; } /* Get the next frame from the buffer queue */ common->cur_frm = list_entry(common->dma_queue.next, struct videobuf_buffer, queue); common->next_frm = common->cur_frm; /* Remove buffer from the buffer queue */ list_del(&common->cur_frm->queue); /* Mark state of the current frame to active */ common->cur_frm->state = VIDEOBUF_ACTIVE; /* Initialize field_id and started member */ ch->field_id = 0; common->started = 1; if (V4L2_MEMORY_USERPTR == common->memory) addr = common->cur_frm->boff; else addr = videobuf_to_dma_contig(common->cur_frm); /* Calculate the offset for Y and C data in the buffer */ vpif_calculate_offsets(ch); if ((vpif->std_info.frm_fmt && ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) && (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) || (!vpif->std_info.frm_fmt && (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) { vpif_dbg(1, debug, "conflict in field format and std format\n"); ret = -EINVAL; goto exit; } /* configure 1 or 2 channel mode */ ret = config->setup_input_channel_mode(vpif->std_info.ycmux_mode); if (ret < 0) { vpif_dbg(1, debug, "can't set vpif channel mode\n"); goto exit; } /* Call vpif_set_params function to set the parameters and addresses */ ret = vpif_set_video_params(vpif, ch->channel_id); if (ret < 0) { vpif_dbg(1, debug, "can't set video params\n"); goto exit; } common->started = ret; vpif_config_addr(ch, ret); common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); /** * Set interrupt for both the fields in VPIF Register enable channel in * VPIF register */ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) { channel0_intr_assert(); channel0_intr_enable(1); enable_channel0(1); } if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || (common->started == 2)) { channel1_intr_assert(); channel1_intr_enable(1); enable_channel1(1); } channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; return ret; exit: videobuf_streamoff(&common->buffer_queue); return ret; } /** * vpif_streamoff() - streamoff handler * @file: file ptr * @priv: file handle * @buftype: v4l2 buffer type */ static int vpif_streamoff(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret; vpif_dbg(2, debug, "vpif_streamoff\n"); if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) { vpif_dbg(1, debug, "buffer type not supported\n"); return -EINVAL; } /* If io is allowed for this file handle, return error */ if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_dbg(1, debug, "io not allowed\n"); return -EACCES; } /* If streaming is not started, return error */ if (!common->started) { vpif_dbg(1, debug, "channel->started\n"); return -EINVAL; } /* disable channel */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } else { enable_channel1(0); channel1_intr_enable(0); } common->started = 0; ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_stream, 0); if (ret && (ret != -ENOIOCTLCMD)) vpif_dbg(1, debug, "stream off failed in subdev\n"); return videobuf_streamoff(&common->buffer_queue); } /** * vpif_map_sub_device_to_input() - Maps sub device to input * @ch - ptr to channel * @config - ptr to capture configuration * @input_index - Given input index from application * @sub_device_index - index into sd table * * lookup the sub device information for a given input index. * we report all the inputs to application. inputs table also * has sub device name for the each input */ static struct vpif_subdev_info *vpif_map_sub_device_to_input( struct channel_obj *ch, struct vpif_capture_config *vpif_cfg, int input_index, int *sub_device_index) { struct vpif_capture_chan_config *chan_cfg; struct vpif_subdev_info *subdev_info = NULL; const char *subdev_name = NULL; int i; vpif_dbg(2, debug, "vpif_map_sub_device_to_input\n"); chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; /** * search through the inputs to find the sub device supporting * the input */ for (i = 0; i < chan_cfg->input_count; i++) { /* For each sub device, loop through input */ if (i == input_index) { subdev_name = chan_cfg->inputs[i].subdev_name; break; } } /* if reached maximum. return null */ if (i == chan_cfg->input_count || (NULL == subdev_name)) return subdev_info; /* loop through the sub device list to get the sub device info */ for (i = 0; i < vpif_cfg->subdev_count; i++) { subdev_info = &vpif_cfg->subdev_info[i]; if (!strcmp(subdev_info->name, subdev_name)) break; } if (i == vpif_cfg->subdev_count) return subdev_info; /* check if the sub device is registered */ if (NULL == vpif_obj.sd[i]) return NULL; *sub_device_index = i; return subdev_info; } /** * vpif_querystd() - querystd handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id * * This function is called to detect standard at the selected input */ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; int ret = 0; vpif_dbg(2, debug, "vpif_querystd\n"); /* Call querystd function of decoder device */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, querystd, std_id); if (ret < 0) vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } /** * vpif_g_std() - get STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; vpif_dbg(2, debug, "vpif_g_std\n"); *std = ch->video.stdid; return 0; } /** * vpif_s_std() - set STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret = 0; vpif_dbg(2, debug, "vpif_s_std\n"); if (common->started) { vpif_err("streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; /* Call encoder subdevice function to set the standard */ ch->video.stdid = *std_id; ch->video.dv_preset = V4L2_DV_INVALID; memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings)); /* Get the information about the standard */ if (vpif_update_std_info(ch)) { vpif_err("Error getting the standard info\n"); return -EINVAL; } /* Configure the default format information */ vpif_config_format(ch); /* set standard in the sub device */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, s_std, *std_id); if (ret < 0) vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } /** * vpif_enum_input() - ENUMINPUT handler * @file: file ptr * @priv: file handle * @input: ptr to input structure */ static int vpif_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_capture_chan_config *chan_cfg; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; chan_cfg = &config->chan_config[ch->channel_id]; if (input->index >= chan_cfg->input_count) { vpif_dbg(1, debug, "Invalid input index\n"); return -EINVAL; } memcpy(input, &chan_cfg->inputs[input->index].input, sizeof(*input)); return 0; } /** * vpif_g_input() - Get INPUT handler * @file: file ptr * @priv: file handle * @index: ptr to input index */ static int vpif_g_input(struct file *file, void *priv, unsigned int *index) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct video_obj *vid_ch = &ch->video; *index = vid_ch->input_idx; return 0; } /** * vpif_s_input() - Set INPUT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_s_input(struct file *file, void *priv, unsigned int index) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_capture_chan_config *chan_cfg; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct video_obj *vid_ch = &ch->video; struct vpif_subdev_info *subdev_info; int ret = 0, sd_index = 0; u32 input = 0, output = 0; chan_cfg = &config->chan_config[ch->channel_id]; if (common->started) { vpif_err("Streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; subdev_info = vpif_map_sub_device_to_input(ch, config, index, &sd_index); if (NULL == subdev_info) { vpif_dbg(1, debug, "couldn't lookup sub device for the input index\n"); return -EINVAL; } /* first setup input path from sub device to vpif */ if (config->setup_input_path) { ret = config->setup_input_path(ch->channel_id, subdev_info->name); if (ret < 0) { vpif_dbg(1, debug, "couldn't setup input path for the" " sub device %s, for input index %d\n", subdev_info->name, index); return ret; } } if (subdev_info->can_route) { input = subdev_info->input; output = subdev_info->output; ret = v4l2_subdev_call(vpif_obj.sd[sd_index], video, s_routing, input, output, 0); if (ret < 0) { vpif_dbg(1, debug, "Failed to set input\n"); return ret; } } vid_ch->input_idx = index; ch->curr_subdev_info = subdev_info; ch->curr_sd_index = sd_index; /* copy interface parameters to vpif */ ch->vpifparams.iface = subdev_info->vpif_if; /* update tvnorms from the sub device input info */ ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std; return ret; } /** * vpif_enum_fmt_vid_cap() - ENUM_FMT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; if (fmt->index != 0) { vpif_dbg(1, debug, "Invalid format index\n"); return -EINVAL; } /* Fill in the information about format */ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb"); fmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } else { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "YCbCr4:2:2 YC Planar"); fmt->pixelformat = V4L2_PIX_FMT_YUV422P; } return 0; } /** * vpif_try_fmt_vid_cap() - TRY_FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; return vpif_check_format(ch, pixfmt, 1); } /** * vpif_g_fmt_vid_cap() - Set INPUT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; /* Check the validity of the buffer type */ if (common->fmt.type != fmt->type) return -EINVAL; /* Fill in the information about format */ *fmt = common->fmt; return 0; } /** * vpif_s_fmt_vid_cap() - Set FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_pix_format *pixfmt; int ret = 0; vpif_dbg(2, debug, "%s\n", __func__); /* If streaming is started, return error */ if (common->started) { vpif_dbg(1, debug, "Streaming is started\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; pixfmt = &fmt->fmt.pix; /* Check for valid field format */ ret = vpif_check_format(ch, pixfmt, 0); if (ret) return ret; /* store the format in the channel object */ common->fmt = *fmt; return 0; } /** * vpif_querycap() - QUERYCAP handler * @file: file ptr * @priv: file handle * @cap: ptr to v4l2_capability structure */ static int vpif_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpif_capture_config *config = vpif_dev->platform_data; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; strlcpy(cap->driver, "vpif capture", sizeof(cap->driver)); strlcpy(cap->bus_info, "DM646x Platform", sizeof(cap->bus_info)); strlcpy(cap->card, config->card_name, sizeof(cap->card)); return 0; } /** * vpif_g_priority() - get priority handler * @file: file ptr * @priv: file handle * @prio: ptr to v4l2_priority structure */ static int vpif_g_priority(struct file *file, void *priv, enum v4l2_priority *prio) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; *prio = v4l2_prio_max(&ch->prio); return 0; } /** * vpif_s_priority() - set priority handler * @file: file ptr * @priv: file handle * @prio: ptr to v4l2_priority structure */ static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_prio_change(&ch->prio, &fh->prio, p); } /** * vpif_cropcap() - cropcap handler * @file: file ptr * @priv: file handle * @crop: ptr to v4l2_cropcap structure */ static int vpif_cropcap(struct file *file, void *priv, struct v4l2_cropcap *crop) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; if (V4L2_BUF_TYPE_VIDEO_CAPTURE != crop->type) return -EINVAL; crop->bounds.left = 0; crop->bounds.top = 0; crop->bounds.height = common->height; crop->bounds.width = common->width; crop->defrect = crop->bounds; return 0; } /** * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_enum_dv_presets(struct file *file, void *priv, struct v4l2_dv_enum_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, enum_dv_presets, preset); } /** * vpif_query_dv_presets() - QUERY_DV_PRESET handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_query_dv_preset(struct file *file, void *priv, struct v4l2_dv_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, query_dv_preset, preset); } /** * vpif_s_dv_presets() - S_DV_PRESETS handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_s_dv_preset(struct file *file, void *priv, struct v4l2_dv_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret = 0; if (common->started) { vpif_dbg(1, debug, "streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (ret) return ret; fh->initialized = 1; /* Call encoder subdevice function to set the standard */ if (mutex_lock_interruptible(&common->lock)) return -ERESTARTSYS; ch->video.dv_preset = preset->preset; ch->video.stdid = V4L2_STD_UNKNOWN; memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings)); /* Get the information about the standard */ if (vpif_update_std_info(ch)) { vpif_dbg(1, debug, "Error getting the standard info\n"); ret = -EINVAL; } else { /* Configure the default format information */ vpif_config_format(ch); ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_dv_preset, preset); } mutex_unlock(&common->lock); return ret; } /** * vpif_g_dv_presets() - G_DV_PRESETS handler * @file: file ptr * @priv: file handle * @preset: input preset */ static int vpif_g_dv_preset(struct file *file, void *priv, struct v4l2_dv_preset *preset) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; preset->preset = ch->video.dv_preset; return 0; } /** * vpif_s_dv_timings() - S_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct vpif_params *vpifparams = &ch->vpifparams; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->bt_timings; int ret; if (timings->type != V4L2_DV_BT_656_1120) { vpif_dbg(2, debug, "Timing type not defined\n"); return -EINVAL; } /* Configure subdevice timings, if any */ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, s_dv_timings, timings); if (ret == -ENOIOCTLCMD) { vpif_dbg(2, debug, "Custom DV timings not supported by " "subdevice\n"); return -EINVAL; } if (ret < 0) { vpif_dbg(2, debug, "Error setting custom DV timings\n"); return ret; } if (!(timings->bt.width && timings->bt.height && (timings->bt.hbackporch || timings->bt.hfrontporch || timings->bt.hsync) && timings->bt.vfrontporch && (timings->bt.vbackporch || timings->bt.vsync))) { vpif_dbg(2, debug, "Timings for width, height, " "horizontal back porch, horizontal sync, " "horizontal front porch, vertical back porch, " "vertical sync and vertical back porch " "must be defined\n"); return -EINVAL; } *bt = timings->bt; /* Configure video port timings */ std_info->eav2sav = bt->hbackporch + bt->hfrontporch + bt->hsync - 8; std_info->sav2eav = bt->width; std_info->l1 = 1; std_info->l3 = bt->vsync + bt->vbackporch + 1; if (bt->interlaced) { if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { std_info->vsize = bt->height * 2 + bt->vfrontporch + bt->vsync + bt->vbackporch + bt->il_vfrontporch + bt->il_vsync + bt->il_vbackporch; std_info->l5 = std_info->vsize/2 - (bt->vfrontporch - 1); std_info->l7 = std_info->vsize/2 + 1; std_info->l9 = std_info->l7 + bt->il_vsync + bt->il_vbackporch + 1; std_info->l11 = std_info->vsize - (bt->il_vfrontporch - 1); } else { vpif_dbg(2, debug, "Required timing values for " "interlaced BT format missing\n"); return -EINVAL; } } else { std_info->vsize = bt->height + bt->vfrontporch + bt->vsync + bt->vbackporch; std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); } strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME); std_info->width = bt->width; std_info->height = bt->height; std_info->frm_fmt = bt->interlaced ? 0 : 1; std_info->ycmux_mode = 0; std_info->capture_format = 0; std_info->vbi_supported = 0; std_info->hd_sd = 1; std_info->stdid = 0; std_info->dv_preset = V4L2_DV_INVALID; vid_ch->stdid = 0; vid_ch->dv_preset = V4L2_DV_INVALID; return 0; } /** * vpif_g_dv_timings() - G_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->bt_timings; timings->bt = *bt; return 0; } /* * vpif_g_chip_ident() - Identify the chip * @file: file ptr * @priv: file handle * @chip: chip identity * * Returns zero or -EINVAL if read operations fails. */ static int vpif_g_chip_ident(struct file *file, void *priv, struct v4l2_dbg_chip_ident *chip) { chip->ident = V4L2_IDENT_NONE; chip->revision = 0; if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER && chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) { vpif_dbg(2, debug, "match_type is invalid.\n"); return -EINVAL; } return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core, g_chip_ident, chip); } #ifdef CONFIG_VIDEO_ADV_DEBUG /* * vpif_dbg_g_register() - Read register * @file: file ptr * @priv: file handle * @reg: register to be read * * Debugging only * Returns zero or -EINVAL if read operations fails. */ static int vpif_dbg_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg){ struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, g_register, reg); } /* * vpif_dbg_s_register() - Write to register * @file: file ptr * @priv: file handle * @reg: register to be modified * * Debugging only * Returns zero or -EINVAL if write operations fails. */ static int vpif_dbg_s_register(struct file *file, void *priv, struct v4l2_dbg_register *reg){ struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core, s_register, reg); } #endif /* * vpif_log_status() - Status information * @file: file ptr * @priv: file handle * * Returns zero. */ static int vpif_log_status(struct file *filep, void *priv) { /* status for sub devices */ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); return 0; } /* vpif capture ioctl operations */ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { .vidioc_querycap = vpif_querycap, .vidioc_g_priority = vpif_g_priority, .vidioc_s_priority = vpif_s_priority, .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap, .vidioc_enum_input = vpif_enum_input, .vidioc_s_input = vpif_s_input, .vidioc_g_input = vpif_g_input, .vidioc_reqbufs = vpif_reqbufs, .vidioc_querybuf = vpif_querybuf, .vidioc_querystd = vpif_querystd, .vidioc_s_std = vpif_s_std, .vidioc_g_std = vpif_g_std, .vidioc_qbuf = vpif_qbuf, .vidioc_dqbuf = vpif_dqbuf, .vidioc_streamon = vpif_streamon, .vidioc_streamoff = vpif_streamoff, .vidioc_cropcap = vpif_cropcap, .vidioc_enum_dv_presets = vpif_enum_dv_presets, .vidioc_s_dv_preset = vpif_s_dv_preset, .vidioc_g_dv_preset = vpif_g_dv_preset, .vidioc_query_dv_preset = vpif_query_dv_preset, .vidioc_s_dv_timings = vpif_s_dv_timings, .vidioc_g_dv_timings = vpif_g_dv_timings, .vidioc_g_chip_ident = vpif_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vpif_dbg_g_register, .vidioc_s_register = vpif_dbg_s_register, #endif .vidioc_log_status = vpif_log_status, }; /* vpif file operations */ static struct v4l2_file_operations vpif_fops = { .owner = THIS_MODULE, .open = vpif_open, .release = vpif_release, .unlocked_ioctl = video_ioctl2, .mmap = vpif_mmap, .poll = vpif_poll }; /* vpif video template */ static struct video_device vpif_video_template = { .name = "vpif", .fops = &vpif_fops, .minor = -1, .ioctl_ops = &vpif_ioctl_ops, }; /** * initialize_vpif() - Initialize vpif data structures * * Allocate memory for data structures and initialize them */ static int initialize_vpif(void) { int err = 0, i, j; int free_channel_objects_index; /* Default number of buffers should be 3 */ if ((ch0_numbuffers > 0) && (ch0_numbuffers < config_params.min_numbuffers)) ch0_numbuffers = config_params.min_numbuffers; if ((ch1_numbuffers > 0) && (ch1_numbuffers < config_params.min_numbuffers)) ch1_numbuffers = config_params.min_numbuffers; /* Set buffer size to min buffers size if it is invalid */ if (ch0_bufsize < config_params.min_bufsize[VPIF_CHANNEL0_VIDEO]) ch0_bufsize = config_params.min_bufsize[VPIF_CHANNEL0_VIDEO]; if (ch1_bufsize < config_params.min_bufsize[VPIF_CHANNEL1_VIDEO]) ch1_bufsize = config_params.min_bufsize[VPIF_CHANNEL1_VIDEO]; config_params.numbuffers[VPIF_CHANNEL0_VIDEO] = ch0_numbuffers; config_params.numbuffers[VPIF_CHANNEL1_VIDEO] = ch1_numbuffers; if (ch0_numbuffers) { config_params.channel_bufsize[VPIF_CHANNEL0_VIDEO] = ch0_bufsize; } if (ch1_numbuffers) { config_params.channel_bufsize[VPIF_CHANNEL1_VIDEO] = ch1_bufsize; } /* Allocate memory for six channel objects */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { vpif_obj.dev[i] = kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL); /* If memory allocation fails, return error */ if (!vpif_obj.dev[i]) { free_channel_objects_index = i; err = -ENOMEM; goto vpif_init_free_channel_objects; } } return 0; vpif_init_free_channel_objects: for (j = 0; j < free_channel_objects_index; j++) kfree(vpif_obj.dev[j]); return err; } /** * vpif_probe : This function probes the vpif capture driver * @pdev: platform device pointer * * This creates device entries by register itself to the V4L2 driver and * initializes fields of each channel objects */ static __init int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct vpif_capture_config *config; int i, j, k, m, q, err; struct i2c_adapter *i2c_adap; struct channel_obj *ch; struct common_obj *common; struct video_device *vfd; struct resource *res; int subdev_count; vpif_dev = &pdev->dev; err = initialize_vpif(); if (err) { v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); return err; } err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); return err; } k = 0; while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { if (request_irq(i, vpif_channel_isr, IRQF_DISABLED, "DM646x_Capture", (void *)(&vpif_obj.dev[k]->channel_id))) { err = -EBUSY; i--; goto vpif_int_err; } } k++; } for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Allocate memory for video device */ vfd = video_device_alloc(); if (NULL == vfd) { for (j = 0; j < i; j++) { ch = vpif_obj.dev[j]; video_device_release(ch->video_dev); } err = -ENOMEM; goto vpif_dev_alloc_err; } /* Initialize field of video device */ *vfd = vpif_video_template; vfd->v4l2_dev = &vpif_obj.v4l2_dev; vfd->release = video_device_release; snprintf(vfd->name, sizeof(vfd->name), "DM646x_VPIFCapture_DRIVER_V%s", VPIF_CAPTURE_VERSION); /* Set video_dev to the video device */ ch->video_dev = vfd; } for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) { ch = vpif_obj.dev[j]; ch->channel_id = j; common = &(ch->common[VPIF_VIDEO_INDEX]); spin_lock_init(&common->irqlock); mutex_init(&common->lock); ch->video_dev->lock = &common->lock; /* Initialize prio member of channel object */ v4l2_prio_init(&ch->prio); err = video_register_device(ch->video_dev, VFL_TYPE_GRABBER, (j ? 1 : 0)); if (err) goto probe_out; video_set_drvdata(ch->video_dev, ch); } i2c_adap = i2c_get_adapter(1); config = pdev->dev.platform_data; subdev_count = config->subdev_count; vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count, GFP_KERNEL); if (vpif_obj.sd == NULL) { vpif_err("unable to allocate memory for subdevice pointers\n"); err = -ENOMEM; goto probe_out; } for (i = 0; i < subdev_count; i++) { subdevdata = &config->subdev_info[i]; vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, &subdevdata->board_info, NULL); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n", subdevdata->name); if (vpif_obj.sd[i]) vpif_obj.sd[i]->grp_id = 1 << i; } v4l2_info(&vpif_obj.v4l2_dev, "DM646x VPIF capture driver initialized\n"); return 0; probe_subdev_out: /* free sub devices memory */ kfree(vpif_obj.sd); j = VPIF_CAPTURE_MAX_DEVICES; probe_out: for (k = 0; k < j; k++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[k]; /* Unregister video device */ video_unregister_device(ch->video_dev); } vpif_dev_alloc_err: k = VPIF_CAPTURE_MAX_DEVICES-1; res = platform_get_resource(pdev, IORESOURCE_IRQ, k); i = res->end; vpif_int_err: for (q = k; q >= 0; q--) { for (m = i; m >= (int)res->start; m--) free_irq(m, (void *)(&vpif_obj.dev[q]->channel_id)); res = platform_get_resource(pdev, IORESOURCE_IRQ, q-1); if (res) i = res->end; } v4l2_device_unregister(&vpif_obj.v4l2_dev); return err; } /** * vpif_remove() - driver remove handler * @device: ptr to platform device structure * * The vidoe device is unregistered */ static int vpif_remove(struct platform_device *device) { int i; struct channel_obj *ch; v4l2_device_unregister(&vpif_obj.v4l2_dev); /* un-register device */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Unregister video device */ video_unregister_device(ch->video_dev); } return 0; } /** * vpif_suspend: vpif device suspend * * TODO: Add suspend code here */ static int vpif_suspend(struct device *dev) { return -1; } /** * vpif_resume: vpif device suspend * * TODO: Add resume code here */ static int vpif_resume(struct device *dev) { return -1; } static const struct dev_pm_ops vpif_dev_pm_ops = { .suspend = vpif_suspend, .resume = vpif_resume, }; static __refdata struct platform_driver vpif_driver = { .driver = { .name = "vpif_capture", .owner = THIS_MODULE, .pm = &vpif_dev_pm_ops, }, .probe = vpif_probe, .remove = vpif_remove, }; /** * vpif_init: initialize the vpif driver * * This function registers device and driver to the kernel, requests irq * handler and allocates memory * for channel objects */ static __init int vpif_init(void) { return platform_driver_register(&vpif_driver); } /** * vpif_cleanup : This function clean up the vpif capture resources * * This will un-registers device and driver to the kernel, frees * requested irq handler and de-allocates memory allocated for channel * objects. */ static void vpif_cleanup(void) { struct platform_device *pdev; struct resource *res; int irq_num; int i = 0; pdev = container_of(vpif_dev, struct platform_device, dev); while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, i))) { for (irq_num = res->start; irq_num <= res->end; irq_num++) free_irq(irq_num, (void *)(&vpif_obj.dev[i]->channel_id)); i++; } platform_driver_unregister(&vpif_driver); kfree(vpif_obj.sd); for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) kfree(vpif_obj.dev[i]); } /* Function for module initialization and cleanup */ module_init(vpif_init); module_exit(vpif_cleanup);
gpl-2.0
vwmofo/android_kernel_htc_liberty-villec2
drivers/scsi/pas16.c
5145
18234
#define AUTOSENSE #define PSEUDO_DMA #define FOO #define UNSAFE /* Not unsafe for PAS16 -- use it */ #define PDEBUG 0 /* * This driver adapted from Drew Eckhardt's Trantor T128 driver * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * ( Based on T128 - DISTRIBUTION RELEASE 3. ) * * Modified to work with the Pro Audio Spectrum/Studio 16 * by John Weidman. * * * For more information, please consult * * Media Vision * (510) 770-8600 * (800) 348-7116 * * and * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * Options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512 * bytes at a time. Since interrupts are disabled by default during * these transfers, we might need this to give reasonable interrupt * service time if the transfer size gets too large. * * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance * increase compared to polled I/O. * * PARITY - enable parity checking. Not supported. * * SCSI2 - enable support for SCSI-II tagged queueing. Untested. * * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This * parameter comes from the NCR5380 code. It is NOT unsafe with * the PAS16 and you should use it. If you don't you will have * a problem with dropped characters during high speed * communications during SCSI transfers. If you really don't * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or * twiddle with the transfer size in the high level code. * * USLEEP - enable support for devices that don't disconnect. Untested. * * The card is detected and initialized in one of several ways : * 1. Autoprobe (default) - There are many different models of * the Pro Audio Spectrum/Studio 16, and I only have one of * them, so this may require a little tweaking. An interrupt * is triggered to autoprobe for the interrupt line. Note: * with the newer model boards, the interrupt is set via * software after reset using the default_irq for the * current board number. * * 2. With command line overrides - pas16=port,irq may be * used on the LILO command line to override the defaults. * * 3. With the PAS16_OVERRIDE compile time define. This is * specified as an array of address, irq tuples. Ie, for * one board at the default 0x388 address, IRQ10, I could say * -DPAS16_OVERRIDE={{0x388, 10}} * NOTE: Untested. * * 4. When included as a module, with arguments passed on the command line: * pas16_irq=xx the interrupt * pas16_addr=xx the port * e.g. "modprobe pas16 pas16_addr=0x388 pas16_irq=5" * * Note that if the override methods are used, place holders must * be specified for other boards in the system. * * * Configuration notes : * The current driver does not support interrupt sharing with the * sound portion of the card. If you use the same irq for the * scsi port and sound you will have problems. Either use * a different irq for the scsi port or don't use interrupts * for the scsi port. * * If you have problems with your card not being recognized, use * the LILO command line override. Try to get it recognized without * interrupts. Ie, for a board at the default 0x388 base port, * boot: linux pas16=0x388,255 * * SCSI_IRQ_NONE (255) should be specified for no interrupt, * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden * on the command line. * * (IRQ_AUTO == 254, SCSI_IRQ_NONE == 255 in NCR5380.h) */ #include <linux/module.h> #include <linux/signal.h> #include <linux/proc_fs.h> #include <asm/io.h> #include <asm/dma.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "pas16.h" #define AUTOPROBE_IRQ #include "NCR5380.h" static int pas_maxi = 0; static int pas_wmaxi = 0; static unsigned short pas16_addr = 0; static int pas16_irq = 0; static const int scsi_irq_translate[] = { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 }; /* The default_irqs array contains values used to set the irq into the * board via software (as must be done on newer model boards without * irq jumpers on the board). The first value in the array will be * assigned to logical board 0, the next to board 1, etc. */ static int default_irqs[] __initdata = { PAS16_DEFAULT_BOARD_1_IRQ, PAS16_DEFAULT_BOARD_2_IRQ, PAS16_DEFAULT_BOARD_3_IRQ, PAS16_DEFAULT_BOARD_4_IRQ }; static struct override { unsigned short io_port; int irq; } overrides #ifdef PAS16_OVERRIDE [] __initdata = PAS16_OVERRIDE; #else [4] __initdata = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO}}; #endif #define NO_OVERRIDES ARRAY_SIZE(overrides) static struct base { unsigned short io_port; int noauto; } bases[] __initdata = { {PAS16_DEFAULT_BASE_1, 0}, {PAS16_DEFAULT_BASE_2, 0}, {PAS16_DEFAULT_BASE_3, 0}, {PAS16_DEFAULT_BASE_4, 0} }; #define NO_BASES ARRAY_SIZE(bases) static const unsigned short pas16_offset[ 8 ] = { 0x1c00, /* OUTPUT_DATA_REG */ 0x1c01, /* INITIATOR_COMMAND_REG */ 0x1c02, /* MODE_REG */ 0x1c03, /* TARGET_COMMAND_REG */ 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */ 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */ 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?) * START_DMA_TARGET_RECEIVE_REG wo */ 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro, * START_DMA_INITIATOR_RECEIVE_REG wo */ }; /*----------------------------------------------------------------*/ /* the following will set the monitor border color (useful to find where something crashed or gets stuck at */ /* 1 = blue 2 = green 3 = cyan 4 = red 5 = magenta 6 = yellow 7 = white */ #if 1 #define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);} #else #define rtrc(i) {} #endif /* * Function : enable_board( int board_num, unsigned short port ) * * Purpose : set address in new model board * * Inputs : board_num - logical board number 0-3, port - base address * */ static void __init enable_board( int board_num, unsigned short port ) { outb( 0xbc + board_num, MASTER_ADDRESS_PTR ); outb( port >> 2, MASTER_ADDRESS_PTR ); } /* * Function : init_board( unsigned short port, int irq ) * * Purpose : Set the board up to handle the SCSI interface * * Inputs : port - base address of the board, * irq - irq to assign to the SCSI port * force_irq - set it even if it conflicts with sound driver * */ static void __init init_board( unsigned short io_port, int irq, int force_irq ) { unsigned int tmp; unsigned int pas_irq_code; /* Initialize the SCSI part of the board */ outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */ outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ NCR5380_read( RESET_PARITY_INTERRUPT_REG ); /* Set the SCSI interrupt pointer without mucking up the sound * interrupt pointer in the same byte. */ pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0; tmp = inb( io_port + IO_CONFIG_3 ); if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0 && !force_irq ) { printk( "pas16: WARNING: Can't use same irq as sound " "driver -- interrupts disabled\n" ); /* Set up the drive parameters, disable 5380 interrupts */ outb( 0x4d, io_port + SYS_CONFIG_4 ); } else { tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 ); outb( tmp, io_port + IO_CONFIG_3 ); /* Set up the drive parameters and enable 5380 interrupts */ outb( 0x6d, io_port + SYS_CONFIG_4 ); } } /* * Function : pas16_hw_detect( unsigned short board_num ) * * Purpose : determine if a pas16 board is present * * Inputs : board_num - logical board number ( 0 - 3 ) * * Returns : 0 if board not found, 1 if found. */ static int __init pas16_hw_detect( unsigned short board_num ) { unsigned char board_rev, tmp; unsigned short io_port = bases[ board_num ].io_port; /* See if we can find a PAS16 board at the address associated * with this logical board number. */ /* First, attempt to take a newer model board out of reset and * give it a base address. This shouldn't affect older boards. */ enable_board( board_num, io_port ); /* Now see if it looks like a PAS16 board */ board_rev = inb( io_port + PCB_CONFIG ); if( board_rev == 0xff ) return 0; tmp = board_rev ^ 0xe0; outb( tmp, io_port + PCB_CONFIG ); tmp = inb( io_port + PCB_CONFIG ); outb( board_rev, io_port + PCB_CONFIG ); if( board_rev != tmp ) /* Not a PAS-16 */ return 0; if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 ) return 0; /* return if no SCSI interface found */ /* Mediavision has some new model boards that return ID bits * that indicate a SCSI interface, but they're not (LMS). We'll * put in an additional test to try to weed them out. */ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */ if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */ return 0; /* and try to read */ NCR5380_write( MODE_REG, 0x00 ); /* it back. */ if( NCR5380_read( MODE_REG ) != 0x00 ) return 0; return 1; } /* * Function : pas16_setup(char *str, int *ints) * * Purpose : LILO command line initialization of the overrides array, * * Inputs : str - unused, ints - array of integer parameters with ints[0] * equal to the number of ints. * */ void __init pas16_setup(char *str, int *ints) { static int commandline_current = 0; int i; if (ints[0] != 2) printk("pas16_setup : usage pas16=io_port,irq\n"); else if (commandline_current < NO_OVERRIDES) { overrides[commandline_current].io_port = (unsigned short) ints[1]; overrides[commandline_current].irq = ints[2]; for (i = 0; i < NO_BASES; ++i) if (bases[i].io_port == (unsigned short) ints[1]) { bases[i].noauto = 1; break; } ++commandline_current; } } /* * Function : int pas16_detect(struct scsi_host_template * tpnt) * * Purpose : detects and initializes PAS16 controllers * that were autoprobed, overridden on the LILO command line, * or specified at compile time. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int __init pas16_detect(struct scsi_host_template * tpnt) { static int current_override = 0; static unsigned short current_base = 0; struct Scsi_Host *instance; unsigned short io_port; int count; tpnt->proc_name = "pas16"; tpnt->proc_info = &pas16_proc_info; if (pas16_addr != 0) { overrides[0].io_port = pas16_addr; /* * This is how we avoid seeing more than * one host adapter at the same I/O port. * Cribbed shamelessly from pas16_setup(). */ for (count = 0; count < NO_BASES; ++count) if (bases[count].io_port == pas16_addr) { bases[count].noauto = 1; break; } } if (pas16_irq != 0) overrides[0].irq = pas16_irq; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { io_port = 0; if (overrides[current_override].io_port) { io_port = overrides[current_override].io_port; enable_board( current_override, io_port ); init_board( io_port, overrides[current_override].irq, 1 ); } else for (; !io_port && (current_base < NO_BASES); ++current_base) { #if (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port); #endif if ( !bases[current_base].noauto && pas16_hw_detect( current_base ) ){ io_port = bases[current_base].io_port; init_board( io_port, default_irqs[ current_base ], 0 ); #if (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : detected board.\n"); #endif } } #if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port); #endif if (!io_port) break; instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) break; instance->io_port = io_port; NCR5380_init(instance, 0); if (overrides[current_override].irq != IRQ_AUTO) instance->irq = overrides[current_override].irq; else instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } if (instance->irq == SCSI_IRQ_NONE) { printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); /* Disable 5380 interrupts, leave drive params the same */ outb( 0x4d, io_port + SYS_CONFIG_4 ); outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 ); } #if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); #endif printk("scsi%d : at 0x%04x", instance->host_no, (int) instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk (" interrupts disabled"); else printk (" irq %d", instance->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE); NCR5380_print_options(instance); printk("\n"); ++current_override; ++count; } return count; } /* * Function : int pas16_biosparam(Disk *disk, struct block_device *dev, int *ip) * * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for * the specified device / size. * * Inputs : size = size of device in sectors (512 bytes), dev = block device * major / minor, ip[] = {heads, sectors, cylinders} * * Returns : always 0 (success), initializes ip * */ /* * XXX Most SCSI boards use this mapping, I could be incorrect. Some one * using hard disks on a trantor should verify that this mapping corresponds * to that used by the BIOS / ASPI driver by running the linux fdisk program * and matching the H_C_S coordinates to what DOS uses. */ int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int * ip) { int size = capacity; ip[0] = 64; ip[1] = 32; ip[2] = size >> 11; /* I think I have it as /(32*64) */ if( ip[2] > 1024 ) { /* yes, >, not >= */ ip[0]=255; ip[1]=63; ip[2]=size/(63*255); if( ip[2] > 1023 ) /* yes >1023... */ ip[2] = 1023; } return 0; } /* * Function : int NCR5380_pread (struct Scsi_Host *instance, * unsigned char *dst, int len) * * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to * dst * * Inputs : dst = destination, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len) { register unsigned char *d = dst; register unsigned short reg = (unsigned short) (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) ++ii; insb( reg, d, i ); if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) { outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET); printk("scsi%d : watchdog timer fired in NCR5380_pread()\n", instance->host_no); return -1; } if (ii > pas_maxi) pas_maxi = ii; return 0; } /* * Function : int NCR5380_pwrite (struct Scsi_Host *instance, * unsigned char *src, int len) * * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from * src * * Inputs : src = source, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len) { register unsigned char *s = src; register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) ++ii; outsb( reg, s, i ); if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) { outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET); printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n", instance->host_no); return -1; } if (ii > pas_maxi) pas_wmaxi = ii; return 0; } #include "NCR5380.c" static int pas16_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->dma_channel != 0xff) free_dma(shost->dma_channel); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); return 0; } static struct scsi_host_template driver_template = { .name = "Pro Audio Spectrum-16 SCSI", .detect = pas16_detect, .release = pas16_release, .queuecommand = pas16_queue_command, .eh_abort_handler = pas16_abort, .eh_bus_reset_handler = pas16_bus_reset, .bios_param = pas16_biosparam, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING, }; #include "scsi_module.c" #ifdef MODULE module_param(pas16_addr, ushort, 0); module_param(pas16_irq, int, 0); #endif MODULE_LICENSE("GPL");
gpl-2.0
chacox/chaco_9195_cm-13.0
drivers/mtd/maps/h720x-flash.c
5145
2747
/* * Flash memory access on Hynix GMS30C7201/HMS30C7202 based * evaluation boards * * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com> * 2003 Thomas Gleixner <tglx@linutronix.de> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <mach/hardware.h> #include <asm/io.h> static struct mtd_info *mymtd; static struct map_info h720x_map = { .name = "H720X", .bankwidth = 4, .size = H720X_FLASH_SIZE, .phys = H720X_FLASH_PHYS, }; static struct mtd_partition h720x_partitions[] = { { .name = "ArMon", .size = 0x00080000, .offset = 0, .mask_flags = MTD_WRITEABLE },{ .name = "Env", .size = 0x00040000, .offset = 0x00080000, .mask_flags = MTD_WRITEABLE },{ .name = "Kernel", .size = 0x00180000, .offset = 0x000c0000, .mask_flags = MTD_WRITEABLE },{ .name = "Ramdisk", .size = 0x00400000, .offset = 0x00240000, .mask_flags = MTD_WRITEABLE },{ .name = "jffs2", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND } }; #define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) /* * Initialize FLASH support */ static int __init h720x_mtd_init(void) { h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); if (!h720x_map.virt) { printk(KERN_ERR "H720x-MTD: ioremap failed\n"); return -EIO; } simple_map_init(&h720x_map); // Probe for flash bankwidth 4 printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n"); mymtd = do_map_probe("cfi_probe", &h720x_map); if (!mymtd) { printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n"); // Probe for bankwidth 2 h720x_map.bankwidth = 2; mymtd = do_map_probe("cfi_probe", &h720x_map); } if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_parse_register(mymtd, NULL, NULL, h720x_partitions, NUM_PARTITIONS); return 0; } iounmap((void *)h720x_map.virt); return -ENXIO; } /* * Cleanup */ static void __exit h720x_mtd_cleanup(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (h720x_map.virt) { iounmap((void *)h720x_map.virt); h720x_map.virt = 0; } } module_init(h720x_mtd_init); module_exit(h720x_mtd_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
gpl-2.0
vinay94185vinay/android_kernel_sony_flamingo
drivers/gpu/ion/ion_system_mapper.c
6937
3192
/* * drivers/gpu/ion/ion_system_mapper.c * * Copyright (C) 2011 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/ion.h> #include <linux/memory.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "ion_priv.h" /* * This mapper is valid for any heap that allocates memory that already has * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, * pages obtained via io_remap, etc. */ static void *ion_kernel_mapper_map(struct ion_mapper *mapper, struct ion_buffer *buffer, struct ion_mapping **mapping) { if (!((1 << buffer->heap->type) & mapper->heap_mask)) { pr_err("%s: attempting to map an unsupported heap\n", __func__); return ERR_PTR(-EINVAL); } /* XXX REVISIT ME!!! */ *((unsigned long *)mapping) = (unsigned long)buffer->priv; return buffer->priv; } static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, struct ion_buffer *buffer, struct ion_mapping *mapping) { if (!((1 << buffer->heap->type) & mapper->heap_mask)) pr_err("%s: attempting to unmap an unsupported heap\n", __func__); } static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, struct ion_buffer *buffer, struct ion_mapping *mapping) { if (!((1 << buffer->heap->type) & mapper->heap_mask)) { pr_err("%s: attempting to unmap an unsupported heap\n", __func__); return ERR_PTR(-EINVAL); } return buffer->priv; } static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma, struct ion_mapping *mapping) { int ret; switch (buffer->heap->type) { case ION_HEAP_KMALLOC: { unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); break; } case ION_HEAP_VMALLOC: ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); break; default: pr_err("%s: attempting to map unsupported heap to userspace\n", __func__); return -EINVAL; } return ret; } static struct ion_mapper_ops ops = { .map = ion_kernel_mapper_map, .map_kernel = ion_kernel_mapper_map_kernel, .map_user = ion_kernel_mapper_map_user, .unmap = ion_kernel_mapper_unmap, }; struct ion_mapper *ion_system_mapper_create(void) { struct ion_mapper *mapper; mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); if (!mapper) return ERR_PTR(-ENOMEM); mapper->type = ION_SYSTEM_MAPPER; mapper->ops = &ops; mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); return mapper; } void ion_system_mapper_destroy(struct ion_mapper *mapper) { kfree(mapper); }
gpl-2.0
jgcaap/boeffla
drivers/usb/host/imx21-dbg.c
7449
12821
/* * Copyright (c) 2009 by Martin Fuzzey * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* this file is part of imx21-hcd.c */ #ifndef DEBUG static inline void create_debug_files(struct imx21 *imx21) { } static inline void remove_debug_files(struct imx21 *imx21) { } static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {} static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int status) {} static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {} static inline void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb) {} static inline void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb) {} static inline void debug_etd_allocated(struct imx21 *imx21) {} static inline void debug_etd_freed(struct imx21 *imx21) {} static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {} static inline void debug_dmem_freed(struct imx21 *imx21, int size) {} static inline void debug_isoc_submitted(struct imx21 *imx21, int frame, struct td *td) {} static inline void debug_isoc_completed(struct imx21 *imx21, int frame, struct td *td, int cc, int len) {} #else #include <linux/debugfs.h> #include <linux/seq_file.h> static const char *dir_labels[] = { "TD 0", "OUT", "IN", "TD 1" }; static const char *speed_labels[] = { "Full", "Low" }; static const char *format_labels[] = { "Control", "ISO", "Bulk", "Interrupt" }; static inline struct debug_stats *stats_for_urb(struct imx21 *imx21, struct urb *urb) { return usb_pipeisoc(urb->pipe) ? &imx21->isoc_stats : &imx21->nonisoc_stats; } static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->submitted++; } static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st) { if (st) stats_for_urb(imx21, urb)->completed_failed++; else stats_for_urb(imx21, urb)->completed_ok++; } static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->unlinked++; } static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->queue_etd++; } static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->queue_dmem++; } static inline void debug_etd_allocated(struct imx21 *imx21) { imx21->etd_usage.maximum = max( ++(imx21->etd_usage.value), imx21->etd_usage.maximum); } static inline void debug_etd_freed(struct imx21 *imx21) { imx21->etd_usage.value--; } static inline void debug_dmem_allocated(struct imx21 *imx21, int size) { imx21->dmem_usage.value += size; imx21->dmem_usage.maximum = max( imx21->dmem_usage.value, imx21->dmem_usage.maximum); } static inline void debug_dmem_freed(struct imx21 *imx21, int size) { imx21->dmem_usage.value -= size; } static void debug_isoc_submitted(struct imx21 *imx21, int frame, struct td *td) { struct debug_isoc_trace *trace = &imx21->isoc_trace[ imx21->isoc_trace_index++]; imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace); trace->schedule_frame = td->frame; trace->submit_frame = frame; trace->request_len = td->len; trace->td = td; } static inline void debug_isoc_completed(struct imx21 *imx21, int frame, struct td *td, int cc, int len) { struct debug_isoc_trace *trace, *trace_failed; int i; int found = 0; trace = imx21->isoc_trace; for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) { if (trace->td == td) { trace->done_frame = frame; trace->done_len = len; trace->cc = cc; trace->td = NULL; found = 1; break; } } if (found && cc) { trace_failed = &imx21->isoc_trace_failed[ imx21->isoc_trace_index_failed++]; imx21->isoc_trace_index_failed %= ARRAY_SIZE( imx21->isoc_trace_failed); *trace_failed = *trace; } } static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize) { if (ep) snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)", ep->desc.bEndpointAddress, usb_endpoint_type(&ep->desc), ep); else snprintf(buf, bufsize, "none"); return buf; } static char *format_etd_dword0(u32 value, char *buf, int bufsize) { snprintf(buf, bufsize, "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d", value & 0x7F, (value >> DW0_ENDPNT) & 0x0F, dir_labels[(value >> DW0_DIRECT) & 0x03], speed_labels[(value >> DW0_SPEED) & 0x01], format_labels[(value >> DW0_FORMAT) & 0x03], (value >> DW0_HALTED) & 0x01); return buf; } static int debug_status_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; int etds_allocated = 0; int etds_sw_busy = 0; int etds_hw_busy = 0; int dmem_blocks = 0; int queued_for_etd = 0; int queued_for_dmem = 0; unsigned int dmem_bytes = 0; int i; struct etd_priv *etd; u32 etd_enable_mask; unsigned long flags; struct imx21_dmem_area *dmem; struct ep_priv *ep_priv; spin_lock_irqsave(&imx21->lock, flags); etd_enable_mask = readl(imx21->regs + USBH_ETDENSET); for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { if (etd->alloc) etds_allocated++; if (etd->urb) etds_sw_busy++; if (etd_enable_mask & (1<<i)) etds_hw_busy++; } list_for_each_entry(dmem, &imx21->dmem_list, list) { dmem_bytes += dmem->size; dmem_blocks++; } list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue) queued_for_etd++; list_for_each_entry(etd, &imx21->queue_for_dmem, queue) queued_for_dmem++; spin_unlock_irqrestore(&imx21->lock, flags); seq_printf(s, "Frame: %d\n" "ETDs allocated: %d/%d (max=%d)\n" "ETDs in use sw: %d\n" "ETDs in use hw: %d\n" "DMEM allocated: %d/%d (max=%d)\n" "DMEM blocks: %d\n" "Queued waiting for ETD: %d\n" "Queued waiting for DMEM: %d\n", readl(imx21->regs + USBH_FRMNUB) & 0xFFFF, etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum, etds_sw_busy, etds_hw_busy, dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum, dmem_blocks, queued_for_etd, queued_for_dmem); return 0; } static int debug_dmem_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; struct imx21_dmem_area *dmem; unsigned long flags; char ep_text[40]; spin_lock_irqsave(&imx21->lock, flags); list_for_each_entry(dmem, &imx21->dmem_list, list) seq_printf(s, "%04X: size=0x%X " "ep=%s\n", dmem->offset, dmem->size, format_ep(dmem->ep, ep_text, sizeof(ep_text))); spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static int debug_etd_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; struct etd_priv *etd; char buf[60]; u32 dword; int i, j; unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { int state = -1; struct urb_priv *urb_priv; if (etd->urb) { urb_priv = etd->urb->hcpriv; if (urb_priv) state = urb_priv->state; } seq_printf(s, "etd_num: %d\n" "ep: %s\n" "alloc: %d\n" "len: %d\n" "busy sw: %d\n" "busy hw: %d\n" "urb state: %d\n" "current urb: %p\n", i, format_ep(etd->ep, buf, sizeof(buf)), etd->alloc, etd->len, etd->urb != NULL, (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0, state, etd->urb); for (j = 0; j < 4; j++) { dword = etd_readl(imx21, i, j); switch (j) { case 0: format_etd_dword0(dword, buf, sizeof(buf)); break; case 2: snprintf(buf, sizeof(buf), "cc=0X%02X", dword >> DW2_COMPCODE); break; default: *buf = 0; break; } seq_printf(s, "dword %d: submitted=%08X cur=%08X [%s]\n", j, etd->submitted_dwords[j], dword, buf); } seq_printf(s, "\n"); } spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static void debug_statistics_show_one(struct seq_file *s, const char *name, struct debug_stats *stats) { seq_printf(s, "%s:\n" "submitted URBs: %lu\n" "completed OK: %lu\n" "completed failed: %lu\n" "unlinked: %lu\n" "queued for ETD: %lu\n" "queued for DMEM: %lu\n\n", name, stats->submitted, stats->completed_ok, stats->completed_failed, stats->unlinked, stats->queue_etd, stats->queue_dmem); } static int debug_statistics_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats); debug_statistics_show_one(s, "isoc", &imx21->isoc_stats); seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks); spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static void debug_isoc_show_one(struct seq_file *s, const char *name, int index, struct debug_isoc_trace *trace) { seq_printf(s, "%s %d:\n" "cc=0X%02X\n" "scheduled frame %d (%d)\n" "submitted frame %d (%d)\n" "completed frame %d (%d)\n" "requested length=%d\n" "completed length=%d\n\n", name, index, trace->cc, trace->schedule_frame, trace->schedule_frame & 0xFFFF, trace->submit_frame, trace->submit_frame & 0xFFFF, trace->done_frame, trace->done_frame & 0xFFFF, trace->request_len, trace->done_len); } static int debug_isoc_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; struct debug_isoc_trace *trace; unsigned long flags; int i; spin_lock_irqsave(&imx21->lock, flags); trace = imx21->isoc_trace_failed; for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++) debug_isoc_show_one(s, "isoc failed", i, trace); trace = imx21->isoc_trace; for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) debug_isoc_show_one(s, "isoc", i, trace); spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static int debug_status_open(struct inode *inode, struct file *file) { return single_open(file, debug_status_show, inode->i_private); } static int debug_dmem_open(struct inode *inode, struct file *file) { return single_open(file, debug_dmem_show, inode->i_private); } static int debug_etd_open(struct inode *inode, struct file *file) { return single_open(file, debug_etd_show, inode->i_private); } static int debug_statistics_open(struct inode *inode, struct file *file) { return single_open(file, debug_statistics_show, inode->i_private); } static int debug_isoc_open(struct inode *inode, struct file *file) { return single_open(file, debug_isoc_show, inode->i_private); } static const struct file_operations debug_status_fops = { .open = debug_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_dmem_fops = { .open = debug_dmem_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_etd_fops = { .open = debug_etd_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_statistics_fops = { .open = debug_statistics_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_isoc_fops = { .open = debug_isoc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void create_debug_files(struct imx21 *imx21) { imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL); if (!imx21->debug_root) goto failed_create_rootdir; if (!debugfs_create_file("status", S_IRUGO, imx21->debug_root, imx21, &debug_status_fops)) goto failed_create; if (!debugfs_create_file("dmem", S_IRUGO, imx21->debug_root, imx21, &debug_dmem_fops)) goto failed_create; if (!debugfs_create_file("etd", S_IRUGO, imx21->debug_root, imx21, &debug_etd_fops)) goto failed_create; if (!debugfs_create_file("statistics", S_IRUGO, imx21->debug_root, imx21, &debug_statistics_fops)) goto failed_create; if (!debugfs_create_file("isoc", S_IRUGO, imx21->debug_root, imx21, &debug_isoc_fops)) goto failed_create; return; failed_create: debugfs_remove_recursive(imx21->debug_root); failed_create_rootdir: imx21->debug_root = NULL; } static void remove_debug_files(struct imx21 *imx21) { if (imx21->debug_root) { debugfs_remove_recursive(imx21->debug_root); imx21->debug_root = NULL; } } #endif
gpl-2.0
Yzoni/android_kernel_google_msm
drivers/usb/host/imx21-dbg.c
7449
12821
/* * Copyright (c) 2009 by Martin Fuzzey * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* this file is part of imx21-hcd.c */ #ifndef DEBUG static inline void create_debug_files(struct imx21 *imx21) { } static inline void remove_debug_files(struct imx21 *imx21) { } static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {} static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int status) {} static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {} static inline void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb) {} static inline void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb) {} static inline void debug_etd_allocated(struct imx21 *imx21) {} static inline void debug_etd_freed(struct imx21 *imx21) {} static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {} static inline void debug_dmem_freed(struct imx21 *imx21, int size) {} static inline void debug_isoc_submitted(struct imx21 *imx21, int frame, struct td *td) {} static inline void debug_isoc_completed(struct imx21 *imx21, int frame, struct td *td, int cc, int len) {} #else #include <linux/debugfs.h> #include <linux/seq_file.h> static const char *dir_labels[] = { "TD 0", "OUT", "IN", "TD 1" }; static const char *speed_labels[] = { "Full", "Low" }; static const char *format_labels[] = { "Control", "ISO", "Bulk", "Interrupt" }; static inline struct debug_stats *stats_for_urb(struct imx21 *imx21, struct urb *urb) { return usb_pipeisoc(urb->pipe) ? &imx21->isoc_stats : &imx21->nonisoc_stats; } static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->submitted++; } static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st) { if (st) stats_for_urb(imx21, urb)->completed_failed++; else stats_for_urb(imx21, urb)->completed_ok++; } static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->unlinked++; } static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->queue_etd++; } static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb) { stats_for_urb(imx21, urb)->queue_dmem++; } static inline void debug_etd_allocated(struct imx21 *imx21) { imx21->etd_usage.maximum = max( ++(imx21->etd_usage.value), imx21->etd_usage.maximum); } static inline void debug_etd_freed(struct imx21 *imx21) { imx21->etd_usage.value--; } static inline void debug_dmem_allocated(struct imx21 *imx21, int size) { imx21->dmem_usage.value += size; imx21->dmem_usage.maximum = max( imx21->dmem_usage.value, imx21->dmem_usage.maximum); } static inline void debug_dmem_freed(struct imx21 *imx21, int size) { imx21->dmem_usage.value -= size; } static void debug_isoc_submitted(struct imx21 *imx21, int frame, struct td *td) { struct debug_isoc_trace *trace = &imx21->isoc_trace[ imx21->isoc_trace_index++]; imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace); trace->schedule_frame = td->frame; trace->submit_frame = frame; trace->request_len = td->len; trace->td = td; } static inline void debug_isoc_completed(struct imx21 *imx21, int frame, struct td *td, int cc, int len) { struct debug_isoc_trace *trace, *trace_failed; int i; int found = 0; trace = imx21->isoc_trace; for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) { if (trace->td == td) { trace->done_frame = frame; trace->done_len = len; trace->cc = cc; trace->td = NULL; found = 1; break; } } if (found && cc) { trace_failed = &imx21->isoc_trace_failed[ imx21->isoc_trace_index_failed++]; imx21->isoc_trace_index_failed %= ARRAY_SIZE( imx21->isoc_trace_failed); *trace_failed = *trace; } } static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize) { if (ep) snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)", ep->desc.bEndpointAddress, usb_endpoint_type(&ep->desc), ep); else snprintf(buf, bufsize, "none"); return buf; } static char *format_etd_dword0(u32 value, char *buf, int bufsize) { snprintf(buf, bufsize, "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d", value & 0x7F, (value >> DW0_ENDPNT) & 0x0F, dir_labels[(value >> DW0_DIRECT) & 0x03], speed_labels[(value >> DW0_SPEED) & 0x01], format_labels[(value >> DW0_FORMAT) & 0x03], (value >> DW0_HALTED) & 0x01); return buf; } static int debug_status_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; int etds_allocated = 0; int etds_sw_busy = 0; int etds_hw_busy = 0; int dmem_blocks = 0; int queued_for_etd = 0; int queued_for_dmem = 0; unsigned int dmem_bytes = 0; int i; struct etd_priv *etd; u32 etd_enable_mask; unsigned long flags; struct imx21_dmem_area *dmem; struct ep_priv *ep_priv; spin_lock_irqsave(&imx21->lock, flags); etd_enable_mask = readl(imx21->regs + USBH_ETDENSET); for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { if (etd->alloc) etds_allocated++; if (etd->urb) etds_sw_busy++; if (etd_enable_mask & (1<<i)) etds_hw_busy++; } list_for_each_entry(dmem, &imx21->dmem_list, list) { dmem_bytes += dmem->size; dmem_blocks++; } list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue) queued_for_etd++; list_for_each_entry(etd, &imx21->queue_for_dmem, queue) queued_for_dmem++; spin_unlock_irqrestore(&imx21->lock, flags); seq_printf(s, "Frame: %d\n" "ETDs allocated: %d/%d (max=%d)\n" "ETDs in use sw: %d\n" "ETDs in use hw: %d\n" "DMEM allocated: %d/%d (max=%d)\n" "DMEM blocks: %d\n" "Queued waiting for ETD: %d\n" "Queued waiting for DMEM: %d\n", readl(imx21->regs + USBH_FRMNUB) & 0xFFFF, etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum, etds_sw_busy, etds_hw_busy, dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum, dmem_blocks, queued_for_etd, queued_for_dmem); return 0; } static int debug_dmem_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; struct imx21_dmem_area *dmem; unsigned long flags; char ep_text[40]; spin_lock_irqsave(&imx21->lock, flags); list_for_each_entry(dmem, &imx21->dmem_list, list) seq_printf(s, "%04X: size=0x%X " "ep=%s\n", dmem->offset, dmem->size, format_ep(dmem->ep, ep_text, sizeof(ep_text))); spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static int debug_etd_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; struct etd_priv *etd; char buf[60]; u32 dword; int i, j; unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { int state = -1; struct urb_priv *urb_priv; if (etd->urb) { urb_priv = etd->urb->hcpriv; if (urb_priv) state = urb_priv->state; } seq_printf(s, "etd_num: %d\n" "ep: %s\n" "alloc: %d\n" "len: %d\n" "busy sw: %d\n" "busy hw: %d\n" "urb state: %d\n" "current urb: %p\n", i, format_ep(etd->ep, buf, sizeof(buf)), etd->alloc, etd->len, etd->urb != NULL, (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0, state, etd->urb); for (j = 0; j < 4; j++) { dword = etd_readl(imx21, i, j); switch (j) { case 0: format_etd_dword0(dword, buf, sizeof(buf)); break; case 2: snprintf(buf, sizeof(buf), "cc=0X%02X", dword >> DW2_COMPCODE); break; default: *buf = 0; break; } seq_printf(s, "dword %d: submitted=%08X cur=%08X [%s]\n", j, etd->submitted_dwords[j], dword, buf); } seq_printf(s, "\n"); } spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static void debug_statistics_show_one(struct seq_file *s, const char *name, struct debug_stats *stats) { seq_printf(s, "%s:\n" "submitted URBs: %lu\n" "completed OK: %lu\n" "completed failed: %lu\n" "unlinked: %lu\n" "queued for ETD: %lu\n" "queued for DMEM: %lu\n\n", name, stats->submitted, stats->completed_ok, stats->completed_failed, stats->unlinked, stats->queue_etd, stats->queue_dmem); } static int debug_statistics_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; unsigned long flags; spin_lock_irqsave(&imx21->lock, flags); debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats); debug_statistics_show_one(s, "isoc", &imx21->isoc_stats); seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks); spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static void debug_isoc_show_one(struct seq_file *s, const char *name, int index, struct debug_isoc_trace *trace) { seq_printf(s, "%s %d:\n" "cc=0X%02X\n" "scheduled frame %d (%d)\n" "submitted frame %d (%d)\n" "completed frame %d (%d)\n" "requested length=%d\n" "completed length=%d\n\n", name, index, trace->cc, trace->schedule_frame, trace->schedule_frame & 0xFFFF, trace->submit_frame, trace->submit_frame & 0xFFFF, trace->done_frame, trace->done_frame & 0xFFFF, trace->request_len, trace->done_len); } static int debug_isoc_show(struct seq_file *s, void *v) { struct imx21 *imx21 = s->private; struct debug_isoc_trace *trace; unsigned long flags; int i; spin_lock_irqsave(&imx21->lock, flags); trace = imx21->isoc_trace_failed; for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++) debug_isoc_show_one(s, "isoc failed", i, trace); trace = imx21->isoc_trace; for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) debug_isoc_show_one(s, "isoc", i, trace); spin_unlock_irqrestore(&imx21->lock, flags); return 0; } static int debug_status_open(struct inode *inode, struct file *file) { return single_open(file, debug_status_show, inode->i_private); } static int debug_dmem_open(struct inode *inode, struct file *file) { return single_open(file, debug_dmem_show, inode->i_private); } static int debug_etd_open(struct inode *inode, struct file *file) { return single_open(file, debug_etd_show, inode->i_private); } static int debug_statistics_open(struct inode *inode, struct file *file) { return single_open(file, debug_statistics_show, inode->i_private); } static int debug_isoc_open(struct inode *inode, struct file *file) { return single_open(file, debug_isoc_show, inode->i_private); } static const struct file_operations debug_status_fops = { .open = debug_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_dmem_fops = { .open = debug_dmem_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_etd_fops = { .open = debug_etd_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_statistics_fops = { .open = debug_statistics_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations debug_isoc_fops = { .open = debug_isoc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void create_debug_files(struct imx21 *imx21) { imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL); if (!imx21->debug_root) goto failed_create_rootdir; if (!debugfs_create_file("status", S_IRUGO, imx21->debug_root, imx21, &debug_status_fops)) goto failed_create; if (!debugfs_create_file("dmem", S_IRUGO, imx21->debug_root, imx21, &debug_dmem_fops)) goto failed_create; if (!debugfs_create_file("etd", S_IRUGO, imx21->debug_root, imx21, &debug_etd_fops)) goto failed_create; if (!debugfs_create_file("statistics", S_IRUGO, imx21->debug_root, imx21, &debug_statistics_fops)) goto failed_create; if (!debugfs_create_file("isoc", S_IRUGO, imx21->debug_root, imx21, &debug_isoc_fops)) goto failed_create; return; failed_create: debugfs_remove_recursive(imx21->debug_root); failed_create_rootdir: imx21->debug_root = NULL; } static void remove_debug_files(struct imx21 *imx21) { if (imx21->debug_root) { debugfs_remove_recursive(imx21->debug_root); imx21->debug_root = NULL; } } #endif
gpl-2.0
zhaochengw/A810S_CAF_KERNEL_3.4
fs/hfs/brec.c
10009
13782
/* * linux/fs/hfs/brec.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handle individual btree records */ #include "btree.h" static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd); static int hfs_brec_update_parent(struct hfs_find_data *fd); static int hfs_btree_inc_height(struct hfs_btree *tree); /* Get the length and offset of the given record in the given node */ u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off) { __be16 retval[2]; u16 dataoff; dataoff = node->tree->node_size - (rec + 2) * 2; hfs_bnode_read(node, retval, dataoff, 4); *off = be16_to_cpu(retval[1]); return be16_to_cpu(retval[0]) - *off; } /* Get the length of the key from a keyed record */ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) { u16 retval, recoff; if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) return 0; if ((node->type == HFS_NODE_INDEX) && !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { if (node->tree->attributes & HFS_TREE_BIGKEYS) retval = node->tree->max_key_len + 2; else retval = node->tree->max_key_len + 1; } else { recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); if (!recoff) return 0; if (node->tree->attributes & HFS_TREE_BIGKEYS) { retval = hfs_bnode_read_u16(node, recoff) + 2; if (retval > node->tree->max_key_len + 2) { printk(KERN_ERR "hfs: keylen %d too large\n", retval); retval = 0; } } else { retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; if (retval > node->tree->max_key_len + 1) { printk(KERN_ERR "hfs: keylen %d too large\n", retval); retval = 0; } } } return retval; } int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node; int size, key_len, rec; int data_off, end_off; int idx_rec_off, data_rec_off, end_rec_off; __be32 cnid; tree = fd->tree; if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); fd->bnode = hfs_bnode_find(tree, tree->leaf_head); if (IS_ERR(fd->bnode)) return PTR_ERR(fd->bnode); fd->record = -1; } new_node = NULL; key_len = (fd->search_key->key_len | 1) + 1; again: /* new record idx and complete record size */ rec = fd->record + 1; size = key_len + entry_len; node = fd->bnode; hfs_bnode_dump(node); /* get last offset */ end_rec_off = tree->node_size - (node->num_recs + 1) * 2; end_off = hfs_bnode_read_u16(node, end_rec_off); end_rec_off -= 2; dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); if (size > end_rec_off - end_off) { if (new_node) panic("not enough room!\n"); new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); goto again; } if (node->type == HFS_NODE_LEAF) { tree->leaf_count++; mark_inode_dirty(tree->inode); } node->num_recs++; /* write new last offset */ hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_bnode_write_u16(node, end_rec_off, end_off + size); data_off = end_off; data_rec_off = end_rec_off + 2; idx_rec_off = tree->node_size - (rec + 1) * 2; if (idx_rec_off == data_rec_off) goto skip; /* move all following entries */ do { data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_bnode_write_u16(node, data_rec_off, data_off + size); data_rec_off += 2; } while (data_rec_off < idx_rec_off); /* move data away */ hfs_bnode_move(node, data_off + size, data_off, end_off - data_off); skip: hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); if (new_node) { /* update parent key if we inserted a key * at the start of the first node */ if (!rec && new_node != node) hfs_brec_update_parent(fd); hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index data entry */ cnid = cpu_to_be32(new_node->this); entry = &cnid; entry_len = sizeof(cnid); /* get index key */ hfs_bnode_read_key(new_node, fd->search_key, 14); __hfs_brec_find(fd->bnode, fd); hfs_bnode_put(new_node); new_node = NULL; if (tree->attributes & HFS_TREE_VARIDXKEYS) key_len = fd->search_key->key_len + 1; else { fd->search_key->key_len = tree->max_key_len; key_len = tree->max_key_len + 1; } goto again; } if (!rec) hfs_brec_update_parent(fd); return 0; } int hfs_brec_remove(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *parent; int end_off, rec_off, data_off, size; tree = fd->tree; node = fd->bnode; again: rec_off = tree->node_size - (fd->record + 2) * 2; end_off = tree->node_size - (node->num_recs + 1) * 2; if (node->type == HFS_NODE_LEAF) { tree->leaf_count--; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength); if (!--node->num_recs) { hfs_bnode_unlink(node); if (!node->parent) return 0; parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); hfs_bnode_put(node); node = fd->bnode = parent; __hfs_brec_find(node, fd); goto again; } hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); if (rec_off == end_off) goto skip; size = fd->keylength + fd->entrylength; do { data_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_write_u16(node, rec_off + 2, data_off - size); rec_off -= 2; } while (rec_off >= end_off); /* fill hole */ hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, data_off - fd->keyoffset - size); skip: hfs_bnode_dump(node); if (!fd->record) hfs_brec_update_parent(fd); return 0; } static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *next_node; struct hfs_bnode_desc node_desc; int num_recs, new_rec_off, new_off, old_rec_off; int data_start, data_end, size; tree = fd->tree; node = fd->bnode; new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) return new_node; hfs_bnode_get(node); dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n", node->this, new_node->this, node->next); new_node->next = node->next; new_node->prev = node->this; new_node->parent = node->parent; new_node->type = node->type; new_node->height = node->height; if (node->next) next_node = hfs_bnode_find(tree, node->next); else next_node = NULL; if (IS_ERR(next_node)) { hfs_bnode_put(node); hfs_bnode_put(new_node); return next_node; } size = tree->node_size / 2 - node->num_recs * 2 - 14; old_rec_off = tree->node_size - 4; num_recs = 1; for (;;) { data_start = hfs_bnode_read_u16(node, old_rec_off); if (data_start > size) break; old_rec_off -= 2; if (++num_recs < node->num_recs) continue; /* panic? */ hfs_bnode_put(node); hfs_bnode_put(new_node); if (next_node) hfs_bnode_put(next_node); return ERR_PTR(-ENOSPC); } if (fd->record + 1 < num_recs) { /* new record is in the lower half, * so leave some more space there */ old_rec_off += 2; num_recs--; data_start = hfs_bnode_read_u16(node, old_rec_off); } else { hfs_bnode_put(node); hfs_bnode_get(new_node); fd->bnode = new_node; fd->record -= num_recs; fd->keyoffset -= data_start - 14; fd->entryoffset -= data_start - 14; } new_node->num_recs = node->num_recs - num_recs; node->num_recs = num_recs; new_rec_off = tree->node_size - 2; new_off = 14; size = data_start - new_off; num_recs = new_node->num_recs; data_end = data_start; while (num_recs) { hfs_bnode_write_u16(new_node, new_rec_off, new_off); old_rec_off -= 2; new_rec_off -= 2; data_end = hfs_bnode_read_u16(node, old_rec_off); new_off = data_end - size; num_recs--; } hfs_bnode_write_u16(new_node, new_rec_off, new_off); hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); /* update new bnode header */ node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); /* update previous bnode header */ node->next = new_node->this; hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); node_desc.next = cpu_to_be32(node->next); node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); /* update next bnode header */ if (next_node) { next_node->prev = new_node->this; hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); node_desc.prev = cpu_to_be32(next_node->prev); hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_put(next_node); } else if (node->this == tree->leaf_tail) { /* if there is no next node, this might be the new tail */ tree->leaf_tail = new_node->this; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_bnode_dump(new_node); hfs_bnode_put(node); return new_node; } static int hfs_brec_update_parent(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *parent; int newkeylen, diff; int rec, rec_off, end_rec_off; int start_off, end_off; tree = fd->tree; node = fd->bnode; new_node = NULL; if (!node->parent) return 0; again: parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd); hfs_bnode_dump(parent); rec = fd->record; /* size difference between old and new key */ if (tree->attributes & HFS_TREE_VARIDXKEYS) newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1; else fd->keylength = newkeylen = tree->max_key_len + 1; dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen); rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; diff = newkeylen - fd->keylength; if (!diff) goto skip; if (diff > 0) { end_off = hfs_bnode_read_u16(parent, end_rec_off); if (end_rec_off - end_off < diff) { printk(KERN_DEBUG "hfs: splitting index node...\n"); fd->bnode = parent; new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); parent = fd->bnode; rec = fd->record; rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; } } end_off = start_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, start_off + diff); start_off -= 4; /* move previous cnid too */ while (rec_off > end_rec_off) { rec_off -= 2; end_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, end_off + diff); } hfs_bnode_move(parent, start_off + diff, start_off, end_off - start_off); skip: hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) hfs_bnode_write_u8(parent, fd->keyoffset, newkeylen - 1); hfs_bnode_dump(parent); hfs_bnode_put(node); node = parent; if (new_node) { __be32 cnid; fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index key and entry */ hfs_bnode_read_key(new_node, fd->search_key, 14); cnid = cpu_to_be32(new_node->this); __hfs_brec_find(fd->bnode, fd); hfs_brec_insert(fd, &cnid, sizeof(cnid)); hfs_bnode_put(fd->bnode); hfs_bnode_put(new_node); if (!rec) { if (new_node == node) goto out; /* restore search_key */ hfs_bnode_read_key(node, fd->search_key, 14); } } if (!rec && node->parent) goto again; out: fd->bnode = node; return 0; } static int hfs_btree_inc_height(struct hfs_btree *tree) { struct hfs_bnode *node, *new_node; struct hfs_bnode_desc node_desc; int key_size, rec; __be32 cnid; node = NULL; if (tree->root) { node = hfs_bnode_find(tree, tree->root); if (IS_ERR(node)) return PTR_ERR(node); } new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) { hfs_bnode_put(node); return PTR_ERR(new_node); } tree->root = new_node->this; if (!tree->depth) { tree->leaf_head = tree->leaf_tail = new_node->this; new_node->type = HFS_NODE_LEAF; new_node->num_recs = 0; } else { new_node->type = HFS_NODE_INDEX; new_node->num_recs = 1; } new_node->parent = 0; new_node->next = 0; new_node->prev = 0; new_node->height = ++tree->depth; node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); rec = tree->node_size - 2; hfs_bnode_write_u16(new_node, rec, 14); if (node) { /* insert old root idx into new root */ node->parent = tree->root; if (node->type == HFS_NODE_LEAF || tree->attributes & HFS_TREE_VARIDXKEYS) key_size = hfs_bnode_read_u8(node, 14) + 1; else key_size = tree->max_key_len + 1; hfs_bnode_copy(new_node, 14, node, 14, key_size); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { key_size = tree->max_key_len + 1; hfs_bnode_write_u8(new_node, 14, tree->max_key_len); } key_size = (key_size + 1) & -2; cnid = cpu_to_be32(node->this); hfs_bnode_write(new_node, &cnid, 14 + key_size, 4); rec -= 2; hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4); hfs_bnode_put(node); } hfs_bnode_put(new_node); mark_inode_dirty(tree->inode); return 0; }
gpl-2.0
de-wolff/caf_device_kernel_msm
drivers/misc/sgi-gru/grufault.c
10265
23749
/* * SN Platform GRU Driver * * FAULT HANDLER FOR GRU DETECTED TLB MISSES * * This file contains code that handles TLB misses within the GRU. * These misses are reported either via interrupts or user polling of * the user CB. * * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/device.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/security.h> #include <linux/prefetch.h> #include <asm/pgtable.h> #include "gru.h" #include "grutables.h" #include "grulib.h" #include "gru_instructions.h" #include <asm/uv/uv_hub.h> /* Return codes for vtop functions */ #define VTOP_SUCCESS 0 #define VTOP_INVALID -1 #define VTOP_RETRY -2 /* * Test if a physical address is a valid GRU GSEG address */ static inline int is_gru_paddr(unsigned long paddr) { return paddr >= gru_start_paddr && paddr < gru_end_paddr; } /* * Find the vma of a GRU segment. Caller must hold mmap_sem. */ struct vm_area_struct *gru_find_vma(unsigned long vaddr) { struct vm_area_struct *vma; vma = find_vma(current->mm, vaddr); if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops) return vma; return NULL; } /* * Find and lock the gts that contains the specified user vaddr. * * Returns: * - *gts with the mmap_sem locked for read and the GTS locked. * - NULL if vaddr invalid OR is not a valid GSEG vaddr. */ static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct gru_thread_state *gts = NULL; down_read(&mm->mmap_sem); vma = gru_find_vma(vaddr); if (vma) gts = gru_find_thread_state(vma, TSID(vaddr, vma)); if (gts) mutex_lock(&gts->ts_ctxlock); else up_read(&mm->mmap_sem); return gts; } static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct gru_thread_state *gts = ERR_PTR(-EINVAL); down_write(&mm->mmap_sem); vma = gru_find_vma(vaddr); if (!vma) goto err; gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); if (IS_ERR(gts)) goto err; mutex_lock(&gts->ts_ctxlock); downgrade_write(&mm->mmap_sem); return gts; err: up_write(&mm->mmap_sem); return gts; } /* * Unlock a GTS that was previously locked with gru_find_lock_gts(). */ static void gru_unlock_gts(struct gru_thread_state *gts) { mutex_unlock(&gts->ts_ctxlock); up_read(&current->mm->mmap_sem); } /* * Set a CB.istatus to active using a user virtual address. This must be done * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY. * If the line is evicted, the status may be lost. The in-cache update * is necessary to prevent the user from seeing a stale cb.istatus that will * change as soon as the TFH restart is complete. Races may cause an * occasional failure to clear the cb.istatus, but that is ok. */ static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk) { if (cbk) { cbk->istatus = CBS_ACTIVE; } } /* * Read & clear a TFM * * The GRU has an array of fault maps. A map is private to a cpu * Only one cpu will be accessing a cpu's fault map. * * This function scans the cpu-private fault map & clears all bits that * are set. The function returns a bitmap that indicates the bits that * were cleared. Note that sense the maps may be updated asynchronously by * the GRU, atomic operations must be used to clear bits. */ static void get_clear_fault_map(struct gru_state *gru, struct gru_tlb_fault_map *imap, struct gru_tlb_fault_map *dmap) { unsigned long i, k; struct gru_tlb_fault_map *tfm; tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id()); prefetchw(tfm); /* Helps on hardware, required for emulator */ for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) { k = tfm->fault_bits[i]; if (k) k = xchg(&tfm->fault_bits[i], 0UL); imap->fault_bits[i] = k; k = tfm->done_bits[i]; if (k) k = xchg(&tfm->done_bits[i], 0UL); dmap->fault_bits[i] = k; } /* * Not functionally required but helps performance. (Required * on emulator) */ gru_flush_cache(tfm); } /* * Atomic (interrupt context) & non-atomic (user context) functions to * convert a vaddr into a physical address. The size of the page * is returned in pageshift. * returns: * 0 - successful * < 0 - error code * 1 - (atomic only) try again in non-atomic context */ static int non_atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, int write, unsigned long *paddr, int *pageshift) { struct page *page; #ifdef CONFIG_HUGETLB_PAGE *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; #else *pageshift = PAGE_SHIFT; #endif if (get_user_pages (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) return -EFAULT; *paddr = page_to_phys(page); put_page(page); return 0; } /* * atomic_pte_lookup * * Convert a user virtual address to a physical address * Only supports Intel large pages (2MB only) on x86_64. * ZZZ - hugepage support is incomplete * * NOTE: mmap_sem is already held on entry to this function. This * guarantees existence of the page tables. */ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, int write, unsigned long *paddr, int *pageshift) { pgd_t *pgdp; pmd_t *pmdp; pud_t *pudp; pte_t pte; pgdp = pgd_offset(vma->vm_mm, vaddr); if (unlikely(pgd_none(*pgdp))) goto err; pudp = pud_offset(pgdp, vaddr); if (unlikely(pud_none(*pudp))) goto err; pmdp = pmd_offset(pudp, vaddr); if (unlikely(pmd_none(*pmdp))) goto err; #ifdef CONFIG_X86_64 if (unlikely(pmd_large(*pmdp))) pte = *(pte_t *) pmdp; else #endif pte = *pte_offset_kernel(pmdp, vaddr); if (unlikely(!pte_present(pte) || (write && (!pte_write(pte) || !pte_dirty(pte))))) return 1; *paddr = pte_pfn(pte) << PAGE_SHIFT; #ifdef CONFIG_HUGETLB_PAGE *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; #else *pageshift = PAGE_SHIFT; #endif return 0; err: return 1; } static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, int write, int atomic, unsigned long *gpa, int *pageshift) { struct mm_struct *mm = gts->ts_mm; struct vm_area_struct *vma; unsigned long paddr; int ret, ps; vma = find_vma(mm, vaddr); if (!vma) goto inval; /* * Atomic lookup is faster & usually works even if called in non-atomic * context. */ rmb(); /* Must/check ms_range_active before loading PTEs */ ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); if (ret) { if (atomic) goto upm; if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) goto inval; } if (is_gru_paddr(paddr)) goto inval; paddr = paddr & ~((1UL << ps) - 1); *gpa = uv_soc_phys_ram_to_gpa(paddr); *pageshift = ps; return VTOP_SUCCESS; inval: return VTOP_INVALID; upm: return VTOP_RETRY; } /* * Flush a CBE from cache. The CBE is clean in the cache. Dirty the * CBE cacheline so that the line will be written back to home agent. * Otherwise the line may be silently dropped. This has no impact * except on performance. */ static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe) { if (unlikely(cbe)) { cbe->cbrexecstatus = 0; /* make CL dirty */ gru_flush_cache(cbe); } } /* * Preload the TLB with entries that may be required. Currently, preloading * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to * the end of the bcopy tranfer, whichever is smaller. */ static void gru_preload_tlb(struct gru_state *gru, struct gru_thread_state *gts, int atomic, unsigned long fault_vaddr, int asid, int write, unsigned char tlb_preload_count, struct gru_tlb_fault_handle *tfh, struct gru_control_block_extended *cbe) { unsigned long vaddr = 0, gpa; int ret, pageshift; if (cbe->opccpy != OP_BCOPY) return; if (fault_vaddr == cbe->cbe_baddr0) vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; else if (fault_vaddr == cbe->cbe_baddr1) vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; fault_vaddr &= PAGE_MASK; vaddr &= PAGE_MASK; vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); while (vaddr > fault_vaddr) { ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write, GRU_PAGESIZE(pageshift))) return; gru_dbg(grudev, "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n", atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid, write, pageshift, gpa); vaddr -= PAGE_SIZE; STAT(tlb_preload_page); } } /* * Drop a TLB entry into the GRU. The fault is described by info in an TFH. * Input: * cb Address of user CBR. Null if not running in user context * Return: * 0 = dropin, exception, or switch to UPM successful * 1 = range invalidate active * < 0 = error code * */ static int gru_try_dropin(struct gru_state *gru, struct gru_thread_state *gts, struct gru_tlb_fault_handle *tfh, struct gru_instruction_bits *cbk) { struct gru_control_block_extended *cbe = NULL; unsigned char tlb_preload_count = gts->ts_tlb_preload_count; int pageshift = 0, asid, write, ret, atomic = !cbk, indexway; unsigned long gpa = 0, vaddr = 0; /* * NOTE: The GRU contains magic hardware that eliminates races between * TLB invalidates and TLB dropins. If an invalidate occurs * in the window between reading the TFH and the subsequent TLB dropin, * the dropin is ignored. This eliminates the need for additional locks. */ /* * Prefetch the CBE if doing TLB preloading */ if (unlikely(tlb_preload_count)) { cbe = gru_tfh_to_cbe(tfh); prefetchw(cbe); } /* * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. * Might be a hardware race OR a stupid user. Ignore FMM because FMM * is a transient state. */ if (tfh->status != TFHSTATUS_EXCEPTION) { gru_flush_cache(tfh); sync_core(); if (tfh->status != TFHSTATUS_EXCEPTION) goto failnoexception; STAT(tfh_stale_on_fault); } if (tfh->state == TFHSTATE_IDLE) goto failidle; if (tfh->state == TFHSTATE_MISS_FMM && cbk) goto failfmm; write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; vaddr = tfh->missvaddr; asid = tfh->missasid; indexway = tfh->indexway; if (asid == 0) goto failnoasid; rmb(); /* TFH must be cache resident before reading ms_range_active */ /* * TFH is cache resident - at least briefly. Fail the dropin * if a range invalidate is active. */ if (atomic_read(&gts->ts_gms->ms_range_active)) goto failactive; ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); if (ret == VTOP_INVALID) goto failinval; if (ret == VTOP_RETRY) goto failupm; if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); if (atomic || !gru_update_cch(gts)) { gts->ts_force_cch_reload = 1; goto failupm; } } if (unlikely(cbe) && pageshift == PAGE_SHIFT) { gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe); gru_flush_cache_cbe(cbe); } gru_cb_set_istatus_active(cbk); gts->ustats.tlbdropin++; tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, GRU_PAGESIZE(pageshift)); gru_dbg(grudev, "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x," " rw %d, ps %d, gpa 0x%lx\n", atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid, indexway, write, pageshift, gpa); STAT(tlb_dropin); return 0; failnoasid: /* No asid (delayed unload). */ STAT(tlb_dropin_fail_no_asid); gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); if (!cbk) tfh_user_polling_mode(tfh); else gru_flush_cache(tfh); gru_flush_cache_cbe(cbe); return -EAGAIN; failupm: /* Atomic failure switch CBR to UPM */ tfh_user_polling_mode(tfh); gru_flush_cache_cbe(cbe); STAT(tlb_dropin_fail_upm); gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); return 1; failfmm: /* FMM state on UPM call */ gru_flush_cache(tfh); gru_flush_cache_cbe(cbe); STAT(tlb_dropin_fail_fmm); gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); return 0; failnoexception: /* TFH status did not show exception pending */ gru_flush_cache(tfh); gru_flush_cache_cbe(cbe); if (cbk) gru_flush_cache(cbk); STAT(tlb_dropin_fail_no_exception); gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state); return 0; failidle: /* TFH state was idle - no miss pending */ gru_flush_cache(tfh); gru_flush_cache_cbe(cbe); if (cbk) gru_flush_cache(cbk); STAT(tlb_dropin_fail_idle); gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); return 0; failinval: /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ tfh_exception(tfh); gru_flush_cache_cbe(cbe); STAT(tlb_dropin_fail_invalid); gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); return -EFAULT; failactive: /* Range invalidate active. Switch to UPM iff atomic */ if (!cbk) tfh_user_polling_mode(tfh); else gru_flush_cache(tfh); gru_flush_cache_cbe(cbe); STAT(tlb_dropin_fail_range_active); gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", tfh, vaddr); return 1; } /* * Process an external interrupt from the GRU. This interrupt is * caused by a TLB miss. * Note that this is the interrupt handler that is registered with linux * interrupt handlers. */ static irqreturn_t gru_intr(int chiplet, int blade) { struct gru_state *gru; struct gru_tlb_fault_map imap, dmap; struct gru_thread_state *gts; struct gru_tlb_fault_handle *tfh = NULL; struct completion *cmp; int cbrnum, ctxnum; STAT(intr); gru = &gru_base[blade]->bs_grus[chiplet]; if (!gru) { dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n", raw_smp_processor_id(), chiplet); return IRQ_NONE; } get_clear_fault_map(gru, &imap, &dmap); gru_dbg(grudev, "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n", smp_processor_id(), chiplet, gru->gs_gid, imap.fault_bits[0], imap.fault_bits[1], dmap.fault_bits[0], dmap.fault_bits[1]); for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { STAT(intr_cbr); cmp = gru->gs_blade->bs_async_wq; if (cmp) complete(cmp); gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", gru->gs_gid, cbrnum, cmp ? cmp->done : -1); } for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { STAT(intr_tfh); tfh = get_tfh_by_index(gru, cbrnum); prefetchw(tfh); /* Helps on hdw, required for emulator */ /* * When hardware sets a bit in the faultmap, it implicitly * locks the GRU context so that it cannot be unloaded. * The gts cannot change until a TFH start/writestart command * is issued. */ ctxnum = tfh->ctxnum; gts = gru->gs_gts[ctxnum]; /* Spurious interrupts can cause this. Ignore. */ if (!gts) { STAT(intr_spurious); continue; } /* * This is running in interrupt context. Trylock the mmap_sem. * If it fails, retry the fault in user context. */ gts->ustats.fmm_tlbmiss++; if (!gts->ts_force_cch_reload && down_read_trylock(&gts->ts_mm->mmap_sem)) { gru_try_dropin(gru, gts, tfh, NULL); up_read(&gts->ts_mm->mmap_sem); } else { tfh_user_polling_mode(tfh); STAT(intr_mm_lock_failed); } } return IRQ_HANDLED; } irqreturn_t gru0_intr(int irq, void *dev_id) { return gru_intr(0, uv_numa_blade_id()); } irqreturn_t gru1_intr(int irq, void *dev_id) { return gru_intr(1, uv_numa_blade_id()); } irqreturn_t gru_intr_mblade(int irq, void *dev_id) { int blade; for_each_possible_blade(blade) { if (uv_blade_nr_possible_cpus(blade)) continue; gru_intr(0, blade); gru_intr(1, blade); } return IRQ_HANDLED; } static int gru_user_dropin(struct gru_thread_state *gts, struct gru_tlb_fault_handle *tfh, void *cb) { struct gru_mm_struct *gms = gts->ts_gms; int ret; gts->ustats.upm_tlbmiss++; while (1) { wait_event(gms->ms_wait_queue, atomic_read(&gms->ms_range_active) == 0); prefetchw(tfh); /* Helps on hdw, required for emulator */ ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb); if (ret <= 0) return ret; STAT(call_os_wait_queue); } } /* * This interface is called as a result of a user detecting a "call OS" bit * in a user CB. Normally means that a TLB fault has occurred. * cb - user virtual address of the CB */ int gru_handle_user_call_os(unsigned long cb) { struct gru_tlb_fault_handle *tfh; struct gru_thread_state *gts; void *cbk; int ucbnum, cbrnum, ret = -EINVAL; STAT(call_os); /* sanity check the cb pointer */ ucbnum = get_cb_number((void *)cb); if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) return -EINVAL; gts = gru_find_lock_gts(cb); if (!gts) return -EINVAL; gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) goto exit; gru_check_context_placement(gts); /* * CCH may contain stale data if ts_force_cch_reload is set. */ if (gts->ts_gru && gts->ts_force_cch_reload) { gts->ts_force_cch_reload = 0; gru_update_cch(gts); } ret = -EAGAIN; cbrnum = thread_cbr_number(gts, ucbnum); if (gts->ts_gru) { tfh = get_tfh_by_index(gts->ts_gru, cbrnum); cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr, gts->ts_ctxnum, ucbnum); ret = gru_user_dropin(gts, tfh, cbk); } exit: gru_unlock_gts(gts); return ret; } /* * Fetch the exception detail information for a CB that terminated with * an exception. */ int gru_get_exception_detail(unsigned long arg) { struct control_block_extended_exc_detail excdet; struct gru_control_block_extended *cbe; struct gru_thread_state *gts; int ucbnum, cbrnum, ret; STAT(user_exception); if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet))) return -EFAULT; gts = gru_find_lock_gts(excdet.cb); if (!gts) return -EINVAL; gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); ucbnum = get_cb_number((void *)excdet.cb); if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { ret = -EINVAL; } else if (gts->ts_gru) { cbrnum = thread_cbr_number(gts, ucbnum); cbe = get_cbe_by_index(gts->ts_gru, cbrnum); gru_flush_cache(cbe); /* CBE not coherent */ sync_core(); /* make sure we are have current data */ excdet.opc = cbe->opccpy; excdet.exopc = cbe->exopccpy; excdet.ecause = cbe->ecause; excdet.exceptdet0 = cbe->idef1upd; excdet.exceptdet1 = cbe->idef3upd; excdet.cbrstate = cbe->cbrstate; excdet.cbrexecstatus = cbe->cbrexecstatus; gru_flush_cache_cbe(cbe); ret = 0; } else { ret = -EAGAIN; } gru_unlock_gts(gts); gru_dbg(grudev, "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, " "exdet0 0x%lx, exdet1 0x%x\n", excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus, excdet.ecause, excdet.exceptdet0, excdet.exceptdet1); if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet))) ret = -EFAULT; return ret; } /* * User request to unload a context. Content is saved for possible reload. */ static int gru_unload_all_contexts(void) { struct gru_thread_state *gts; struct gru_state *gru; int gid, ctxnum; if (!capable(CAP_SYS_ADMIN)) return -EPERM; foreach_gid(gid) { gru = GID_TO_GRU(gid); spin_lock(&gru->gs_lock); for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { gts = gru->gs_gts[ctxnum]; if (gts && mutex_trylock(&gts->ts_ctxlock)) { spin_unlock(&gru->gs_lock); gru_unload_context(gts, 1); mutex_unlock(&gts->ts_ctxlock); spin_lock(&gru->gs_lock); } } spin_unlock(&gru->gs_lock); } return 0; } int gru_user_unload_context(unsigned long arg) { struct gru_thread_state *gts; struct gru_unload_context_req req; STAT(user_unload_context); if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; gru_dbg(grudev, "gseg 0x%lx\n", req.gseg); if (!req.gseg) return gru_unload_all_contexts(); gts = gru_find_lock_gts(req.gseg); if (!gts) return -EINVAL; if (gts->ts_gru) gru_unload_context(gts, 1); gru_unlock_gts(gts); return 0; } /* * User request to flush a range of virtual addresses from the GRU TLB * (Mainly for testing). */ int gru_user_flush_tlb(unsigned long arg) { struct gru_thread_state *gts; struct gru_flush_tlb_req req; struct gru_mm_struct *gms; STAT(user_flush_tlb); if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg, req.vaddr, req.len); gts = gru_find_lock_gts(req.gseg); if (!gts) return -EINVAL; gms = gts->ts_gms; gru_unlock_gts(gts); gru_flush_tlb_range(gms, req.vaddr, req.len); return 0; } /* * Fetch GSEG statisticss */ long gru_get_gseg_statistics(unsigned long arg) { struct gru_thread_state *gts; struct gru_get_gseg_statistics_req req; if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; /* * The library creates arrays of contexts for threaded programs. * If no gts exists in the array, the context has never been used & all * statistics are implicitly 0. */ gts = gru_find_lock_gts(req.gseg); if (gts) { memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats)); gru_unlock_gts(gts); } else { memset(&req.stats, 0, sizeof(gts->ustats)); } if (copy_to_user((void __user *)arg, &req, sizeof(req))) return -EFAULT; return 0; } /* * Register the current task as the user of the GSEG slice. * Needed for TLB fault interrupt targeting. */ int gru_set_context_option(unsigned long arg) { struct gru_thread_state *gts; struct gru_set_context_option_req req; int ret = 0; STAT(set_context_option); if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); gts = gru_find_lock_gts(req.gseg); if (!gts) { gts = gru_alloc_locked_gts(req.gseg); if (IS_ERR(gts)) return PTR_ERR(gts); } switch (req.op) { case sco_blade_chiplet: /* Select blade/chiplet for GRU context */ if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] || req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) { ret = -EINVAL; } else { gts->ts_user_blade_id = req.val1; gts->ts_user_chiplet_id = req.val0; gru_check_context_placement(gts); } break; case sco_gseg_owner: /* Register the current task as the GSEG owner */ gts->ts_tgid_owner = current->tgid; break; case sco_cch_req_slice: /* Set the CCH slice option */ gts->ts_cch_req_slice = req.val1 & 3; break; default: ret = -EINVAL; } gru_unlock_gts(gts); return ret; }
gpl-2.0
nnvt/android_kernel_oneplus_msm8974
fs/nls/nls_cp850.c
12569
13400
/* * linux/fs/nls/nls_cp850.c * * Charset cp850 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5, /* 0x90*/ 0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9, 0x00ff, 0x00d6, 0x00dc, 0x00f8, 0x00a3, 0x00d8, 0x00d7, 0x0192, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x00ae, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x00c1, 0x00c2, 0x00c0, 0x00a9, 0x2563, 0x2551, 0x2557, 0x255d, 0x00a2, 0x00a5, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x00e3, 0x00c3, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x00a4, /* 0xd0*/ 0x00f0, 0x00d0, 0x00ca, 0x00cb, 0x00c8, 0x0131, 0x00cd, 0x00ce, 0x00cf, 0x2518, 0x250c, 0x2588, 0x2584, 0x00a6, 0x00cc, 0x2580, /* 0xe0*/ 0x00d3, 0x00df, 0x00d4, 0x00d2, 0x00f5, 0x00d5, 0x00b5, 0x00fe, 0x00de, 0x00da, 0x00db, 0x00d9, 0x00fd, 0x00dd, 0x00af, 0x00b4, /* 0xf0*/ 0x00ad, 0x00b1, 0x2017, 0x00be, 0x00b6, 0x00a7, 0x00f7, 0x00b8, 0x00b0, 0x00a8, 0x00b7, 0x00b9, 0x00b3, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0xad, 0xbd, 0x9c, 0xcf, 0xbe, 0xdd, 0xf5, /* 0xa0-0xa7 */ 0xf9, 0xb8, 0xa6, 0xae, 0xaa, 0xf0, 0xa9, 0xee, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0xfc, 0xef, 0xe6, 0xf4, 0xfa, /* 0xb0-0xb7 */ 0xf7, 0xfb, 0xa7, 0xaf, 0xac, 0xab, 0xf3, 0xa8, /* 0xb8-0xbf */ 0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */ 0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, /* 0xc8-0xcf */ 0xd1, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0x9e, /* 0xd0-0xd7 */ 0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0xed, 0xe8, 0xe1, /* 0xd8-0xdf */ 0x85, 0xa0, 0x83, 0xc6, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */ 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */ 0xd0, 0xa4, 0x95, 0xa2, 0x93, 0xe4, 0x94, 0xf6, /* 0xf0-0xf7 */ 0x9b, 0x97, 0xa3, 0x96, 0x81, 0xec, 0xe7, 0x98, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, /* 0x10-0x17 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */ 0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */ 0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */ 0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */ 0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xa0, 0x83, 0x85, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd0, 0x88, 0x89, 0x8a, 0xd5, 0xa1, 0x8c, /* 0xd0-0xd7 */ 0x8b, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0x8d, 0xdf, /* 0xd8-0xdf */ 0xa2, 0xe1, 0x93, 0x95, 0xe4, 0xe4, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe7, 0xa3, 0x96, 0x97, 0xec, 0xec, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0xb6, 0x8e, 0xb7, 0x8f, 0x80, /* 0x80-0x87 */ 0xd2, 0xd3, 0xd4, 0xd8, 0xd7, 0xde, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x92, 0x92, 0xe2, 0x99, 0xe3, 0xea, 0xeb, /* 0x90-0x97 */ 0x00, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0xb5, 0xd6, 0xe0, 0xe9, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd1, 0xd1, 0xd2, 0xd3, 0xd4, 0x49, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe5, 0xe5, 0x00, 0xe8, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xed, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp850", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp850(void) { return register_nls(&table); } static void __exit exit_nls_cp850(void) { unregister_nls(&table); } module_init(init_nls_cp850) module_exit(exit_nls_cp850) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
broonie/sound-2.6
drivers/staging/winbond/linux/wb35reg.c
26
21847
#include "sysdef.h" extern void phy_calibration_winbond(hw_data_t *phw_data, u32 frequency); // TRUE : read command process successfully // FALSE : register not support // RegisterNo : start base // pRegisterData : data point // NumberOfData : number of register data // Flag : AUTO_INCREMENT - RegisterNo will auto increment 4 // NO_INCREMENT - Function will write data into the same register unsigned char Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; PURB pUrb = NULL; PREG_QUEUE pRegQueue = NULL; u16 UrbSize; struct usb_ctrlrequest *dr; u16 i, DataSize = NumberOfData*4; // Module shutdown if (pHwData->SurpriseRemove) return FALSE; // Trying to use burst write function if use new hardware UrbSize = sizeof(REG_QUEUE) + DataSize + sizeof(struct usb_ctrlrequest); OS_MEMORY_ALLOC( (void* *)&pRegQueue, UrbSize ); pUrb = wb_usb_alloc_urb(0); if( pUrb && pRegQueue ) { pRegQueue->DIRECT = 2;// burst write register pRegQueue->INDEX = RegisterNo; pRegQueue->pBuffer = (u32 *)((u8 *)pRegQueue + sizeof(REG_QUEUE)); memcpy( pRegQueue->pBuffer, pRegisterData, DataSize ); //the function for reversing register data from little endian to big endian for( i=0; i<NumberOfData ; i++ ) pRegQueue->pBuffer[i] = cpu_to_le32( pRegQueue->pBuffer[i] ); dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE) + DataSize); dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE; dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment dr->wIndex = cpu_to_le16( RegisterNo ); dr->wLength = cpu_to_le16( DataSize ); pRegQueue->Next = NULL; pRegQueue->pUsbReq = dr; pRegQueue->pUrb = pUrb; spin_lock_irq( &pWb35Reg->EP0VM_spin_lock ); if (pWb35Reg->pRegFirst == NULL) pWb35Reg->pRegFirst = pRegQueue; else pWb35Reg->pRegLast->Next = pRegQueue; pWb35Reg->pRegLast = pRegQueue; spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock ); // Start EP0VM Wb35Reg_EP0VM_start(pHwData); return TRUE; } else { if (pUrb) usb_free_urb(pUrb); if (pRegQueue) kfree(pRegQueue); return FALSE; } return FALSE; } void Wb35Reg_Update(phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; switch (RegisterNo) { case 0x3b0: pWb35Reg->U1B0 = RegisterValue; break; case 0x3bc: pWb35Reg->U1BC_LEDConfigure = RegisterValue; break; case 0x400: pWb35Reg->D00_DmaControl = RegisterValue; break; case 0x800: pWb35Reg->M00_MacControl = RegisterValue; break; case 0x804: pWb35Reg->M04_MulticastAddress1 = RegisterValue; break; case 0x808: pWb35Reg->M08_MulticastAddress2 = RegisterValue; break; case 0x824: pWb35Reg->M24_MacControl = RegisterValue; break; case 0x828: pWb35Reg->M28_MacControl = RegisterValue; break; case 0x82c: pWb35Reg->M2C_MacControl = RegisterValue; break; case 0x838: pWb35Reg->M38_MacControl = RegisterValue; break; case 0x840: pWb35Reg->M40_MacControl = RegisterValue; break; case 0x844: pWb35Reg->M44_MacControl = RegisterValue; break; case 0x848: pWb35Reg->M48_MacControl = RegisterValue; break; case 0x84c: pWb35Reg->M4C_MacStatus = RegisterValue; break; case 0x860: pWb35Reg->M60_MacControl = RegisterValue; break; case 0x868: pWb35Reg->M68_MacControl = RegisterValue; break; case 0x870: pWb35Reg->M70_MacControl = RegisterValue; break; case 0x874: pWb35Reg->M74_MacControl = RegisterValue; break; case 0x878: pWb35Reg->M78_ERPInformation = RegisterValue; break; case 0x87C: pWb35Reg->M7C_MacControl = RegisterValue; break; case 0x880: pWb35Reg->M80_MacControl = RegisterValue; break; case 0x884: pWb35Reg->M84_MacControl = RegisterValue; break; case 0x888: pWb35Reg->M88_MacControl = RegisterValue; break; case 0x898: pWb35Reg->M98_MacControl = RegisterValue; break; case 0x100c: pWb35Reg->BB0C = RegisterValue; break; case 0x102c: pWb35Reg->BB2C = RegisterValue; break; case 0x1030: pWb35Reg->BB30 = RegisterValue; break; case 0x103c: pWb35Reg->BB3C = RegisterValue; break; case 0x1048: pWb35Reg->BB48 = RegisterValue; break; case 0x104c: pWb35Reg->BB4C = RegisterValue; break; case 0x1050: pWb35Reg->BB50 = RegisterValue; break; case 0x1054: pWb35Reg->BB54 = RegisterValue; break; case 0x1058: pWb35Reg->BB58 = RegisterValue; break; case 0x105c: pWb35Reg->BB5C = RegisterValue; break; case 0x1060: pWb35Reg->BB60 = RegisterValue; break; } } // TRUE : read command process successfully // FALSE : register not support unsigned char Wb35Reg_WriteSync( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; int ret = -1; // Module shutdown if (pHwData->SurpriseRemove) return FALSE; RegisterValue = cpu_to_le32(RegisterValue); // update the register by send usb message------------------------------------ pWb35Reg->SyncIoPause = 1; // 20060717.5 Wait until EP0VM stop while (pWb35Reg->EP0vm_state != VM_STOP) OS_SLEEP(10000); // Sync IoCallDriver pWb35Reg->EP0vm_state = VM_RUNNING; ret = usb_control_msg( pHwData->WbUsb.udev, usb_sndctrlpipe( pHwData->WbUsb.udev, 0 ), 0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0,RegisterNo, &RegisterValue, 4, HZ*100 ); pWb35Reg->EP0vm_state = VM_STOP; pWb35Reg->SyncIoPause = 0; Wb35Reg_EP0VM_start(pHwData); if (ret < 0) { #ifdef _PE_REG_DUMP_ WBDEBUG(("EP0 Write register usb message sending error\n")); #endif pHwData->SurpriseRemove = 1; // 20060704.2 return FALSE; } return TRUE; } // TRUE : read command process successfully // FALSE : register not support unsigned char Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; struct usb_ctrlrequest *dr; PURB pUrb = NULL; PREG_QUEUE pRegQueue = NULL; u16 UrbSize; // Module shutdown if (pHwData->SurpriseRemove) return FALSE; // update the register by send urb request------------------------------------ UrbSize = sizeof(REG_QUEUE) + sizeof(struct usb_ctrlrequest); OS_MEMORY_ALLOC( (void* *)&pRegQueue, UrbSize ); pUrb = wb_usb_alloc_urb(0); if (pUrb && pRegQueue) { pRegQueue->DIRECT = 1;// burst write register pRegQueue->INDEX = RegisterNo; pRegQueue->VALUE = cpu_to_le32(RegisterValue); pRegQueue->RESERVED_VALID = FALSE; dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE)); dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE; dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode dr->wValue = cpu_to_le16(0x0); dr->wIndex = cpu_to_le16(RegisterNo); dr->wLength = cpu_to_le16(4); // Enter the sending queue pRegQueue->Next = NULL; pRegQueue->pUsbReq = dr; pRegQueue->pUrb = pUrb; spin_lock_irq(&pWb35Reg->EP0VM_spin_lock ); if (pWb35Reg->pRegFirst == NULL) pWb35Reg->pRegFirst = pRegQueue; else pWb35Reg->pRegLast->Next = pRegQueue; pWb35Reg->pRegLast = pRegQueue; spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock ); // Start EP0VM Wb35Reg_EP0VM_start(pHwData); return TRUE; } else { if (pUrb) usb_free_urb(pUrb); kfree(pRegQueue); return FALSE; } } //This command will be executed with a user defined value. When it completes, //this value is useful. For example, hal_set_current_channel will use it. // TRUE : read command process successfully // FALSE : register not support unsigned char Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue, s8 *pValue, s8 Len) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; struct usb_ctrlrequest *dr; PURB pUrb = NULL; PREG_QUEUE pRegQueue = NULL; u16 UrbSize; // Module shutdown if (pHwData->SurpriseRemove) return FALSE; // update the register by send urb request------------------------------------ UrbSize = sizeof(REG_QUEUE) + sizeof(struct usb_ctrlrequest); OS_MEMORY_ALLOC((void* *) &pRegQueue, UrbSize ); pUrb = wb_usb_alloc_urb(0); if (pUrb && pRegQueue) { pRegQueue->DIRECT = 1;// burst write register pRegQueue->INDEX = RegisterNo; pRegQueue->VALUE = cpu_to_le32(RegisterValue); //NOTE : Users must guarantee the size of value will not exceed the buffer size. memcpy(pRegQueue->RESERVED, pValue, Len); pRegQueue->RESERVED_VALID = TRUE; dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE)); dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE; dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode dr->wValue = cpu_to_le16(0x0); dr->wIndex = cpu_to_le16(RegisterNo); dr->wLength = cpu_to_le16(4); // Enter the sending queue pRegQueue->Next = NULL; pRegQueue->pUsbReq = dr; pRegQueue->pUrb = pUrb; spin_lock_irq (&pWb35Reg->EP0VM_spin_lock ); if( pWb35Reg->pRegFirst == NULL ) pWb35Reg->pRegFirst = pRegQueue; else pWb35Reg->pRegLast->Next = pRegQueue; pWb35Reg->pRegLast = pRegQueue; spin_unlock_irq ( &pWb35Reg->EP0VM_spin_lock ); // Start EP0VM Wb35Reg_EP0VM_start(pHwData); return TRUE; } else { if (pUrb) usb_free_urb(pUrb); kfree(pRegQueue); return FALSE; } } // TRUE : read command process successfully // FALSE : register not support // pRegisterValue : It must be a resident buffer due to asynchronous read register. unsigned char Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue ) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; u32 * pltmp = pRegisterValue; int ret = -1; // Module shutdown if (pHwData->SurpriseRemove) return FALSE; // Read the register by send usb message------------------------------------ pWb35Reg->SyncIoPause = 1; // 20060717.5 Wait until EP0VM stop while (pWb35Reg->EP0vm_state != VM_STOP) OS_SLEEP(10000); pWb35Reg->EP0vm_state = VM_RUNNING; ret = usb_control_msg( pHwData->WbUsb.udev, usb_rcvctrlpipe(pHwData->WbUsb.udev, 0), 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN, 0x0, RegisterNo, pltmp, 4, HZ*100 ); *pRegisterValue = cpu_to_le32(*pltmp); pWb35Reg->EP0vm_state = VM_STOP; Wb35Reg_Update( pHwData, RegisterNo, *pRegisterValue ); pWb35Reg->SyncIoPause = 0; Wb35Reg_EP0VM_start( pHwData ); if (ret < 0) { #ifdef _PE_REG_DUMP_ WBDEBUG(("EP0 Read register usb message sending error\n")); #endif pHwData->SurpriseRemove = 1; // 20060704.2 return FALSE; } return TRUE; } // TRUE : read command process successfully // FALSE : register not support // pRegisterValue : It must be a resident buffer due to asynchronous read register. unsigned char Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue ) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; struct usb_ctrlrequest * dr; PURB pUrb; PREG_QUEUE pRegQueue; u16 UrbSize; // Module shutdown if (pHwData->SurpriseRemove) return FALSE; // update the variable by send Urb to read register ------------------------------------ UrbSize = sizeof(REG_QUEUE) + sizeof(struct usb_ctrlrequest); OS_MEMORY_ALLOC( (void* *)&pRegQueue, UrbSize ); pUrb = wb_usb_alloc_urb(0); if( pUrb && pRegQueue ) { pRegQueue->DIRECT = 0;// read register pRegQueue->INDEX = RegisterNo; pRegQueue->pBuffer = pRegisterValue; dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE)); dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN; dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode dr->wValue = cpu_to_le16(0x0); dr->wIndex = cpu_to_le16 (RegisterNo); dr->wLength = cpu_to_le16 (4); // Enter the sending queue pRegQueue->Next = NULL; pRegQueue->pUsbReq = dr; pRegQueue->pUrb = pUrb; spin_lock_irq ( &pWb35Reg->EP0VM_spin_lock ); if( pWb35Reg->pRegFirst == NULL ) pWb35Reg->pRegFirst = pRegQueue; else pWb35Reg->pRegLast->Next = pRegQueue; pWb35Reg->pRegLast = pRegQueue; spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock ); // Start EP0VM Wb35Reg_EP0VM_start( pHwData ); return TRUE; } else { if (pUrb) usb_free_urb( pUrb ); kfree(pRegQueue); return FALSE; } } void Wb35Reg_EP0VM_start( phw_data_t pHwData ) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; if (OS_ATOMIC_INC( pHwData->Adapter, &pWb35Reg->RegFireCount) == 1) { pWb35Reg->EP0vm_state = VM_RUNNING; Wb35Reg_EP0VM(pHwData); } else OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount ); } void Wb35Reg_EP0VM(phw_data_t pHwData ) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; PURB pUrb; struct usb_ctrlrequest *dr; u32 * pBuffer; int ret = -1; PREG_QUEUE pRegQueue; if (pWb35Reg->SyncIoPause) goto cleanup; if (pHwData->SurpriseRemove) goto cleanup; // Get the register data and send to USB through Irp spin_lock_irq( &pWb35Reg->EP0VM_spin_lock ); pRegQueue = pWb35Reg->pRegFirst; spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock ); if (!pRegQueue) goto cleanup; // Get an Urb, send it pUrb = (PURB)pRegQueue->pUrb; dr = pRegQueue->pUsbReq; pUrb = pRegQueue->pUrb; pBuffer = pRegQueue->pBuffer; if (pRegQueue->DIRECT == 1) // output pBuffer = &pRegQueue->VALUE; usb_fill_control_urb( pUrb, pHwData->WbUsb.udev, REG_DIRECTION(pHwData->WbUsb.udev,pRegQueue), (u8 *)dr,pBuffer,cpu_to_le16(dr->wLength), Wb35Reg_EP0VM_complete, (void*)pHwData); pWb35Reg->EP0vm_state = VM_RUNNING; ret = wb_usb_submit_urb( pUrb ); if (ret < 0) { #ifdef _PE_REG_DUMP_ WBDEBUG(("EP0 Irp sending error\n")); #endif goto cleanup; } return; cleanup: pWb35Reg->EP0vm_state = VM_STOP; OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount ); } void Wb35Reg_EP0VM_complete(PURB pUrb) { phw_data_t pHwData = (phw_data_t)pUrb->context; PWB35REG pWb35Reg = &pHwData->Wb35Reg; PREG_QUEUE pRegQueue; // Variable setting pWb35Reg->EP0vm_state = VM_COMPLETED; pWb35Reg->EP0VM_status = pUrb->status; if (pHwData->SurpriseRemove) { // Let WbWlanHalt to handle surprise remove pWb35Reg->EP0vm_state = VM_STOP; OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount ); } else { // Complete to send, remove the URB from the first spin_lock_irq( &pWb35Reg->EP0VM_spin_lock ); pRegQueue = pWb35Reg->pRegFirst; if (pRegQueue == pWb35Reg->pRegLast) pWb35Reg->pRegLast = NULL; pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next; spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock ); if (pWb35Reg->EP0VM_status) { #ifdef _PE_REG_DUMP_ WBDEBUG(("EP0 IoCompleteRoutine return error\n")); DebugUsbdStatusInformation( pWb35Reg->EP0VM_status ); #endif pWb35Reg->EP0vm_state = VM_STOP; pHwData->SurpriseRemove = 1; } else { // Success. Update the result // Start the next send Wb35Reg_EP0VM(pHwData); } kfree(pRegQueue); } usb_free_urb(pUrb); } void Wb35Reg_destroy(phw_data_t pHwData) { PWB35REG pWb35Reg = &pHwData->Wb35Reg; PURB pUrb; PREG_QUEUE pRegQueue; Uxx_power_off_procedure(pHwData); // Wait for Reg operation completed do { OS_SLEEP(10000); // Delay for waiting function enter 940623.1.a } while (pWb35Reg->EP0vm_state != VM_STOP); OS_SLEEP(10000); // Delay for waiting function enter 940623.1.b // Release all the data in RegQueue spin_lock_irq( &pWb35Reg->EP0VM_spin_lock ); pRegQueue = pWb35Reg->pRegFirst; while (pRegQueue) { if (pRegQueue == pWb35Reg->pRegLast) pWb35Reg->pRegLast = NULL; pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next; pUrb = pRegQueue->pUrb; spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock ); if (pUrb) { usb_free_urb(pUrb); kfree(pRegQueue); } else { #ifdef _PE_REG_DUMP_ WBDEBUG(("EP0 queue release error\n")); #endif } spin_lock_irq( &pWb35Reg->EP0VM_spin_lock ); pRegQueue = pWb35Reg->pRegFirst; } spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock ); } //==================================================================================== // The function can be run in passive-level only. //==================================================================================== unsigned char Wb35Reg_initial(phw_data_t pHwData) { PWB35REG pWb35Reg=&pHwData->Wb35Reg; u32 ltmp; u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval; // Spin lock is acquired for read and write IRP command spin_lock_init( &pWb35Reg->EP0VM_spin_lock ); // Getting RF module type from EEPROM ------------------------------------ Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d) Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp ); //Update RF module type and determine the PHY type by inf or EEPROM pWb35Reg->EEPROMPhyType = (u8)( ltmp & 0xff ); // 0 V MAX2825, 1 V MAX2827, 2 V MAX2828, 3 V MAX2829 // 16V AL2230, 17 - AL7230, 18 - AL2230S // 32 Reserved // 33 - W89RF242(TxVGA 0~19), 34 - W89RF242(TxVGA 0~34) if (pWb35Reg->EEPROMPhyType != RF_DECIDE_BY_INF) { if( (pWb35Reg->EEPROMPhyType == RF_MAXIM_2825) || (pWb35Reg->EEPROMPhyType == RF_MAXIM_2827) || (pWb35Reg->EEPROMPhyType == RF_MAXIM_2828) || (pWb35Reg->EEPROMPhyType == RF_MAXIM_2829) || (pWb35Reg->EEPROMPhyType == RF_MAXIM_V1) || (pWb35Reg->EEPROMPhyType == RF_AIROHA_2230) || (pWb35Reg->EEPROMPhyType == RF_AIROHA_2230S) || (pWb35Reg->EEPROMPhyType == RF_AIROHA_7230) || (pWb35Reg->EEPROMPhyType == RF_WB_242) || (pWb35Reg->EEPROMPhyType == RF_WB_242_1)) pHwData->phy_type = pWb35Reg->EEPROMPhyType; } // Power On procedure running. The relative parameter will be set according to phy_type Uxx_power_on_procedure( pHwData ); // Reading MAC address Uxx_ReadEthernetAddress( pHwData ); // Read VCO trim for RF parameter Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08200000 ); Wb35Reg_ReadSync( pHwData, 0x03b4, &VCO_trim ); // Read Antenna On/Off of software flag Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08210000 ); Wb35Reg_ReadSync( pHwData, 0x03b4, &SoftwareSet ); // Read TXVGA Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08100000 ); Wb35Reg_ReadSync( pHwData, 0x03b4, &TxVga ); // Get Scan interval setting from EEPROM offset 0x1c Wb35Reg_WriteSync( pHwData, 0x03b4, 0x081d0000 ); Wb35Reg_ReadSync( pHwData, 0x03b4, &Region_ScanInterval ); // Update Ethernet address memcpy( pHwData->CurrentMacAddress, pHwData->PermanentMacAddress, ETH_LENGTH_OF_ADDRESS ); // Update software variable pHwData->SoftwareSet = (u16)(SoftwareSet & 0xffff); TxVga &= 0x000000ff; pHwData->PowerIndexFromEEPROM = (u8)TxVga; pHwData->VCO_trim = (u8)VCO_trim & 0xff; if (pHwData->VCO_trim == 0xff) pHwData->VCO_trim = 0x28; pWb35Reg->EEPROMRegion = (u8)(Region_ScanInterval>>8); // 20060720 if( pWb35Reg->EEPROMRegion<1 || pWb35Reg->EEPROMRegion>6 ) pWb35Reg->EEPROMRegion = REGION_AUTO; //For Get Tx VGA from EEPROM 20060315.5 move here GetTxVgaFromEEPROM( pHwData ); // Set Scan Interval pHwData->Scan_Interval = (u8)(Region_ScanInterval & 0xff) * 10; if ((pHwData->Scan_Interval == 2550) || (pHwData->Scan_Interval < 10)) // Is default setting 0xff * 10 pHwData->Scan_Interval = SCAN_MAX_CHNL_TIME; // Initial register RFSynthesizer_initial(pHwData); BBProcessor_initial(pHwData); // Async write, must wait until complete Wb35Reg_phy_calibration(pHwData); Mxx_initial(pHwData); Dxx_initial(pHwData); if (pHwData->SurpriseRemove) return FALSE; else return TRUE; // Initial fail } //=================================================================================== // CardComputeCrc -- // // Description: // Runs the AUTODIN II CRC algorithm on buffer Buffer of length, Length. // // Arguments: // Buffer - the input buffer // Length - the length of Buffer // // Return Value: // The 32-bit CRC value. // // Note: // This is adapted from the comments in the assembly language // version in _GENREQ.ASM of the DWB NE1000/2000 driver. //================================================================================== u32 CardComputeCrc(u8 * Buffer, u32 Length) { u32 Crc, Carry; u32 i, j; u8 CurByte; Crc = 0xffffffff; for (i = 0; i < Length; i++) { CurByte = Buffer[i]; for (j = 0; j < 8; j++) { Carry = ((Crc & 0x80000000) ? 1 : 0) ^ (CurByte & 0x01); Crc <<= 1; CurByte >>= 1; if (Carry) { Crc =(Crc ^ 0x04c11db6) | Carry; } } } return Crc; } //================================================================== // BitReverse -- // Reverse the bits in the input argument, dwData, which is // regarded as a string of bits with the length, DataLength. // // Arguments: // dwData : // DataLength : // // Return: // The converted value. //================================================================== u32 BitReverse( u32 dwData, u32 DataLength) { u32 HalfLength, i, j; u32 BitA, BitB; if ( DataLength <= 0) return 0; // No conversion is done. dwData = dwData & (0xffffffff >> (32 - DataLength)); HalfLength = DataLength / 2; for ( i = 0, j = DataLength-1 ; i < HalfLength; i++, j--) { BitA = GetBit( dwData, i); BitB = GetBit( dwData, j); if (BitA && !BitB) { dwData = ClearBit( dwData, i); dwData = SetBit( dwData, j); } else if (!BitA && BitB) { dwData = SetBit( dwData, i); dwData = ClearBit( dwData, j); } else { // Do nothing since these two bits are of the save values. } } return dwData; } void Wb35Reg_phy_calibration( phw_data_t pHwData ) { u32 BB3c, BB54; if ((pHwData->phy_type == RF_WB_242) || (pHwData->phy_type == RF_WB_242_1)) { phy_calibration_winbond ( pHwData, 2412 ); // Sync operation Wb35Reg_ReadSync( pHwData, 0x103c, &BB3c ); Wb35Reg_ReadSync( pHwData, 0x1054, &BB54 ); pHwData->BB3c_cal = BB3c; pHwData->BB54_cal = BB54; RFSynthesizer_initial(pHwData); BBProcessor_initial(pHwData); // Async operation Wb35Reg_WriteSync( pHwData, 0x103c, BB3c ); Wb35Reg_WriteSync( pHwData, 0x1054, BB54 ); } }
gpl-2.0
iwinoto/v4l-media_build
media/drivers/net/vxlan.c
26
75247
/* * VXLAN: Virtual eXtensible Local Area Network * * Copyright (c) 2012-2013 Vyatta Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/rculist.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/udp.h> #include <linux/igmp.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/hash.h> #include <linux/ethtool.h> #include <net/arp.h> #include <net/ndisc.h> #include <net/ip.h> #include <net/ip_tunnels.h> #include <net/icmp.h> #include <net/udp.h> #include <net/rtnetlink.h> #include <net/route.h> #include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/vxlan.h> #include <net/protocol.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/addrconf.h> #include <net/ip6_tunnel.h> #include <net/ip6_checksum.h> #endif #define VXLAN_VERSION "0.1" #define PORT_HASH_BITS 8 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS) #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) #define FDB_HASH_BITS 8 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS) #define FDB_AGE_DEFAULT 300 /* 5 min */ #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ /* VXLAN protocol header */ struct vxlanhdr { __be32 vx_flags; __be32 vx_vni; }; /* UDP port for VXLAN traffic. * The IANA assigned port is 4789, but the Linux default is 8472 * for compatibility with early adopters. */ static unsigned short vxlan_port __read_mostly = 8472; module_param_named(udp_port, vxlan_port, ushort, 0444); MODULE_PARM_DESC(udp_port, "Destination UDP port"); static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static int vxlan_net_id; static const u8 all_zeros_mac[ETH_ALEN]; /* per-network namespace private data for this module */ struct vxlan_net { struct list_head vxlan_list; struct hlist_head sock_list[PORT_HASH_SIZE]; spinlock_t sock_lock; }; union vxlan_addr { struct sockaddr_in sin; struct sockaddr_in6 sin6; struct sockaddr sa; }; struct vxlan_rdst { union vxlan_addr remote_ip; __be16 remote_port; u32 remote_vni; u32 remote_ifindex; struct list_head list; struct rcu_head rcu; }; /* Forwarding table entry */ struct vxlan_fdb { struct hlist_node hlist; /* linked list of entries */ struct rcu_head rcu; unsigned long updated; /* jiffies */ unsigned long used; struct list_head remotes; u16 state; /* see ndm_state */ u8 flags; /* see ndm_flags */ u8 eth_addr[ETH_ALEN]; }; /* Pseudo network device */ struct vxlan_dev { struct hlist_node hlist; /* vni hash table */ struct list_head next; /* vxlan's per namespace list */ struct vxlan_sock *vn_sock; /* listening socket */ struct net_device *dev; struct vxlan_rdst default_dst; /* default destination */ union vxlan_addr saddr; /* source address */ __be16 dst_port; __u16 port_min; /* source port range */ __u16 port_max; __u8 tos; /* TOS override */ __u8 ttl; u32 flags; /* VXLAN_F_* below */ struct work_struct sock_work; struct work_struct igmp_join; struct work_struct igmp_leave; unsigned long age_interval; struct timer_list age_timer; spinlock_t hash_lock; unsigned int addrcnt; unsigned int addrmax; struct hlist_head fdb_head[FDB_HASH_SIZE]; }; #define VXLAN_F_LEARN 0x01 #define VXLAN_F_PROXY 0x02 #define VXLAN_F_RSC 0x04 #define VXLAN_F_L2MISS 0x08 #define VXLAN_F_L3MISS 0x10 #define VXLAN_F_IPV6 0x20 /* internal flag */ /* salt for hash table */ static u32 vxlan_salt __read_mostly; static struct workqueue_struct *vxlan_wq; static void vxlan_sock_work(struct work_struct *work); #if IS_ENABLED(CONFIG_IPV6) static inline bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) { if (a->sa.sa_family != b->sa.sa_family) return false; if (a->sa.sa_family == AF_INET6) return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr); else return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; } static inline bool vxlan_addr_any(const union vxlan_addr *ipa) { if (ipa->sa.sa_family == AF_INET6) return ipv6_addr_any(&ipa->sin6.sin6_addr); else return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); } static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) { if (ipa->sa.sa_family == AF_INET6) return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); else return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); } static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) { if (nla_len(nla) >= sizeof(struct in6_addr)) { nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr)); ip->sa.sa_family = AF_INET6; return 0; } else if (nla_len(nla) >= sizeof(__be32)) { ip->sin.sin_addr.s_addr = nla_get_be32(nla); ip->sa.sa_family = AF_INET; return 0; } else { return -EAFNOSUPPORT; } } static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, const union vxlan_addr *ip) { if (ip->sa.sa_family == AF_INET6) return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr); else return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr); } #else /* !CONFIG_IPV6 */ static inline bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) { return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr; } static inline bool vxlan_addr_any(const union vxlan_addr *ipa) { return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); } static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) { return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); } static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla) { if (nla_len(nla) >= sizeof(struct in6_addr)) { return -EAFNOSUPPORT; } else if (nla_len(nla) >= sizeof(__be32)) { ip->sin.sin_addr.s_addr = nla_get_be32(nla); ip->sa.sa_family = AF_INET; return 0; } else { return -EAFNOSUPPORT; } } static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, const union vxlan_addr *ip) { return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr); } #endif /* Virtual Network hash table head */ static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id) { return &vs->vni_list[hash_32(id, VNI_HASH_BITS)]; } /* Socket hash table head */ static inline struct hlist_head *vs_head(struct net *net, __be16 port) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; } /* First remote destination for a forwarding entry. * Guaranteed to be non-NULL because remotes are never deleted. */ static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb) { return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list); } static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) { return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); } /* Find VXLAN socket based on network namespace and UDP port */ static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) { struct vxlan_sock *vs; hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { if (inet_sk(vs->sock->sk)->inet_sport == port) return vs; } return NULL; } static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) { struct vxlan_dev *vxlan; hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) { if (vxlan->default_dst.remote_vni == id) return vxlan; } return NULL; } /* Look up VNI in a per net namespace table */ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) { struct vxlan_sock *vs; vs = vxlan_find_sock(net, port); if (!vs) return NULL; return vxlan_vs_find_vni(vs, id); } /* Fill in neighbour message in skbuff. */ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, const struct vxlan_fdb *fdb, u32 portid, u32 seq, int type, unsigned int flags, const struct vxlan_rdst *rdst) { unsigned long now = jiffies; struct nda_cacheinfo ci; struct nlmsghdr *nlh; struct ndmsg *ndm; bool send_ip, send_eth; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); if (nlh == NULL) return -EMSGSIZE; ndm = nlmsg_data(nlh); memset(ndm, 0, sizeof(*ndm)); send_eth = send_ip = true; if (type == RTM_GETNEIGH) { ndm->ndm_family = AF_INET; send_ip = !vxlan_addr_any(&rdst->remote_ip); send_eth = !is_zero_ether_addr(fdb->eth_addr); } else ndm->ndm_family = AF_BRIDGE; ndm->ndm_state = fdb->state; ndm->ndm_ifindex = vxlan->dev->ifindex; ndm->ndm_flags = fdb->flags; ndm->ndm_type = NDA_DST; if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) goto nla_put_failure; if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) goto nla_put_failure; if (rdst->remote_port && rdst->remote_port != vxlan->dst_port && nla_put_be16(skb, NDA_PORT, rdst->remote_port)) goto nla_put_failure; if (rdst->remote_vni != vxlan->default_dst.remote_vni && nla_put_u32(skb, NDA_VNI, rdst->remote_vni)) goto nla_put_failure; if (rdst->remote_ifindex && nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) goto nla_put_failure; ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ci.ndm_confirmed = 0; ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); ci.ndm_refcnt = 0; if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static inline size_t vxlan_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ + nla_total_size(sizeof(__be16)) /* NDA_PORT */ + nla_total_size(sizeof(__be32)) /* NDA_VNI */ + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ + nla_total_size(sizeof(struct nda_cacheinfo)); } static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, struct vxlan_rdst *rd, int type) { struct net *net = dev_net(vxlan->dev); struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) goto errout; err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); if (err < 0) { /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); return; errout: if (err < 0) rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb f = { .state = NUD_STALE, }; struct vxlan_rdst remote = { .remote_ip = *ipa, /* goes to NDA_DST */ .remote_vni = VXLAN_N_VID, }; vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); } static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) { struct vxlan_fdb f = { .state = NUD_STALE, }; struct vxlan_rdst remote = { }; memcpy(f.eth_addr, eth_addr, ETH_ALEN); vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); } /* Hash Ethernet address */ static u32 eth_hash(const unsigned char *addr) { u64 value = get_unaligned((u64 *)addr); /* only want 6 bytes */ #ifdef __BIG_ENDIAN value >>= 16; #else value <<= 16; #endif return hash_64(value, FDB_HASH_BITS); } /* Hash chain to use given mac address */ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, const u8 *mac) { return &vxlan->fdb_head[eth_hash(mac)]; } /* Look up Ethernet address in forwarding table */ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, const u8 *mac) { struct hlist_head *head = vxlan_fdb_head(vxlan, mac); struct vxlan_fdb *f; hlist_for_each_entry_rcu(f, head, hlist) { if (ether_addr_equal(mac, f->eth_addr)) return f; } return NULL; } static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, const u8 *mac) { struct vxlan_fdb *f; f = __vxlan_find_mac(vxlan, mac); if (f) f->used = jiffies; return f; } /* caller should hold vxlan->hash_lock */ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) { struct vxlan_rdst *rd; list_for_each_entry(rd, &f->remotes, list) { if (vxlan_addr_equal(&rd->remote_ip, ip) && rd->remote_port == port && rd->remote_vni == vni && rd->remote_ifindex == ifindex) return rd; } return NULL; } /* Replace destination of unicast mac */ static int vxlan_fdb_replace(struct vxlan_fdb *f, union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) { struct vxlan_rdst *rd; rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); if (rd) return 0; rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); if (!rd) return 0; rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; rd->remote_ifindex = ifindex; return 1; } /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex, struct vxlan_rdst **rdp) { struct vxlan_rdst *rd; rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); if (rd) return 0; rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) return -ENOBUFS; rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; rd->remote_ifindex = ifindex; list_add_tail_rcu(&rd->list, &f->remotes); *rdp = rd; return 1; } static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct sk_buff *p, **pp = NULL; struct vxlanhdr *vh, *vh2; struct ethhdr *eh, *eh2; unsigned int hlen, off_vx, off_eth; const struct packet_offload *ptype; __be16 type; int flush = 1; off_vx = skb_gro_offset(skb); hlen = off_vx + sizeof(*vh); vh = skb_gro_header_fast(skb, off_vx); if (skb_gro_header_hard(skb, hlen)) { vh = skb_gro_header_slow(skb, hlen, off_vx); if (unlikely(!vh)) goto out; } skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ off_eth = skb_gro_offset(skb); hlen = off_eth + sizeof(*eh); eh = skb_gro_header_fast(skb, off_eth); if (skb_gro_header_hard(skb, hlen)) { eh = skb_gro_header_slow(skb, hlen, off_eth); if (unlikely(!eh)) goto out; } flush = 0; for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; vh2 = (struct vxlanhdr *)(p->data + off_vx); eh2 = (struct ethhdr *)(p->data + off_eth); if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } type = eh->h_proto; rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (ptype == NULL) { flush = 1; goto out_unlock; } skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */ pp = ptype->callbacks.gro_receive(head, skb); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) { struct ethhdr *eh; struct packet_offload *ptype; __be16 type; int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); int err = -ENOSYS; eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); type = eh->h_proto; rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype != NULL) err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len); rcu_read_unlock(); return err; } /* Notify netdevs that UDP port started listening */ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) { struct net_device *dev; struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); sa_family_t sa_family = sk->sk_family; __be16 port = inet_sk(sk)->inet_sport; int err; if (sa_family == AF_INET) { err = udp_add_offload(&vs->udp_offloads); if (err) pr_warn("vxlan: udp_add_offload failed with status %d\n", err); } rcu_read_lock(); for_each_netdev_rcu(net, dev) { if (dev->netdev_ops->ndo_add_vxlan_port) dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, port); } rcu_read_unlock(); } /* Notify netdevs that UDP port is no more listening */ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) { struct net_device *dev; struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); sa_family_t sa_family = sk->sk_family; __be16 port = inet_sk(sk)->inet_sport; rcu_read_lock(); for_each_netdev_rcu(net, dev) { if (dev->netdev_ops->ndo_del_vxlan_port) dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family, port); } rcu_read_unlock(); if (sa_family == AF_INET) udp_del_offload(&vs->udp_offloads); } /* Add new entry to forwarding table -- assumes lock held */ static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __u32 vni, __u32 ifindex, __u8 ndm_flags) { struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; int notify = 0; f = __vxlan_find_mac(vxlan, mac); if (f) { if (flags & NLM_F_EXCL) { netdev_dbg(vxlan->dev, "lost race to create %pM\n", mac); return -EEXIST; } if (f->state != state) { f->state = state; f->updated = jiffies; notify = 1; } if (f->flags != ndm_flags) { f->flags = ndm_flags; f->updated = jiffies; notify = 1; } if ((flags & NLM_F_REPLACE)) { /* Only change unicasts */ if (!(is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { int rc = vxlan_fdb_replace(f, ip, port, vni, ifindex); if (rc < 0) return rc; notify |= rc; } else return -EOPNOTSUPP; } if ((flags & NLM_F_APPEND) && (is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); if (rc < 0) return rc; notify |= rc; } } else { if (!(flags & NLM_F_CREATE)) return -ENOENT; if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax) return -ENOSPC; /* Disallow replace to add a multicast entry */ if ((flags & NLM_F_REPLACE) && (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) return -EOPNOTSUPP; netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); f = kmalloc(sizeof(*f), GFP_ATOMIC); if (!f) return -ENOMEM; notify = 1; f->state = state; f->flags = ndm_flags; f->updated = f->used = jiffies; INIT_LIST_HEAD(&f->remotes); memcpy(f->eth_addr, mac, ETH_ALEN); vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); ++vxlan->addrcnt; hlist_add_head_rcu(&f->hlist, vxlan_fdb_head(vxlan, mac)); } if (notify) { if (rd == NULL) rd = first_remote_rtnl(f); vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); } return 0; } static void vxlan_fdb_free(struct rcu_head *head) { struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); struct vxlan_rdst *rd, *nd; list_for_each_entry_safe(rd, nd, &f->remotes, list) kfree(rd); kfree(f); } static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) { netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); } static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex) { struct net *net = dev_net(vxlan->dev); int err; if (tb[NDA_DST]) { err = vxlan_nla_get_addr(ip, tb[NDA_DST]); if (err) return err; } else { union vxlan_addr *remote = &vxlan->default_dst.remote_ip; if (remote->sa.sa_family == AF_INET) { ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); ip->sa.sa_family = AF_INET; #if IS_ENABLED(CONFIG_IPV6) } else { ip->sin6.sin6_addr = in6addr_any; ip->sa.sa_family = AF_INET6; #endif } } if (tb[NDA_PORT]) { if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) return -EINVAL; *port = nla_get_be16(tb[NDA_PORT]); } else { *port = vxlan->dst_port; } if (tb[NDA_VNI]) { if (nla_len(tb[NDA_VNI]) != sizeof(u32)) return -EINVAL; *vni = nla_get_u32(tb[NDA_VNI]); } else { *vni = vxlan->default_dst.remote_vni; } if (tb[NDA_IFINDEX]) { struct net_device *tdev; if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) return -EINVAL; *ifindex = nla_get_u32(tb[NDA_IFINDEX]); tdev = __dev_get_by_index(net, *ifindex); if (!tdev) return -EADDRNOTAVAIL; } else { *ifindex = 0; } return 0; } /* Add static entry (via netlink) */ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 flags) { struct vxlan_dev *vxlan = netdev_priv(dev); /* struct net *net = dev_net(vxlan->dev); */ union vxlan_addr ip; __be16 port; u32 vni, ifindex; int err; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { pr_info("RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); return -EINVAL; } if (tb[NDA_DST] == NULL) return -EINVAL; err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); if (err) return err; if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) return -EAFNOSUPPORT; spin_lock_bh(&vxlan->hash_lock); err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, port, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); return err; } /* Delete entry (via netlink) */ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; struct vxlan_rdst *rd = NULL; union vxlan_addr ip; __be16 port; u32 vni, ifindex; int err; err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); if (err) return err; err = -ENOENT; spin_lock_bh(&vxlan->hash_lock); f = vxlan_find_mac(vxlan, addr); if (!f) goto out; if (!vxlan_addr_any(&ip)) { rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); if (!rd) goto out; } err = 0; /* remove a destination if it's not the only one on the list, * otherwise destroy the fdb entry */ if (rd && !list_is_singular(&f->remotes)) { list_del_rcu(&rd->list); vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); kfree_rcu(rd, rcu); goto out; } vxlan_fdb_destroy(vxlan, f); out: spin_unlock_bh(&vxlan->hash_lock); return err; } /* Dump forwarding table */ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, int idx) { struct vxlan_dev *vxlan = netdev_priv(dev); unsigned int h; for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; int err; hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; if (idx < cb->args[0]) goto skip; list_for_each_entry_rcu(rd, &f->remotes, list) { err = vxlan_fdb_info(skb, vxlan, f, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); if (err < 0) goto out; } skip: ++idx; } } out: return idx; } /* Watch incoming packets to learn mapping between Ethernet address * and Tunnel endpoint. * Return true if packet is bogus and should be droppped. */ static bool vxlan_snoop(struct net_device *dev, union vxlan_addr *src_ip, const u8 *src_mac) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { struct vxlan_rdst *rdst = first_remote_rcu(f); if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip))) return false; /* Don't migrate static entries, drop packets */ if (f->state & NUD_NOARP) return true; if (net_ratelimit()) netdev_info(dev, "%pM migrated from %pIS to %pIS\n", src_mac, &rdst->remote_ip, &src_ip); rdst->remote_ip = *src_ip; f->updated = jiffies; vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); /* close off race between vxlan_flush and incoming packets */ if (netif_running(dev)) vxlan_fdb_create(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->dst_port, vxlan->default_dst.remote_vni, 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); } return false; } /* See if multicast group is already in use by other ID */ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) { struct vxlan_dev *vxlan; /* The vxlan_sock is only used by dev, leaving group has * no effect on other vxlan devices. */ if (atomic_read(&dev->vn_sock->refcnt) == 1) return false; list_for_each_entry(vxlan, &vn->vxlan_list, next) { if (!netif_running(vxlan->dev) || vxlan == dev) continue; if (vxlan->vn_sock != dev->vn_sock) continue; if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip, &dev->default_dst.remote_ip)) continue; if (vxlan->default_dst.remote_ifindex != dev->default_dst.remote_ifindex) continue; return true; } return false; } static void vxlan_sock_hold(struct vxlan_sock *vs) { atomic_inc(&vs->refcnt); } void vxlan_sock_release(struct vxlan_sock *vs) { struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); struct vxlan_net *vn = net_generic(net, vxlan_net_id); if (!atomic_dec_and_test(&vs->refcnt)) return; spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); rcu_assign_sk_user_data(vs->sock->sk, NULL); vxlan_notify_del_rx_port(vs); spin_unlock(&vn->sock_lock); queue_work(vxlan_wq, &vs->del_work); } EXPORT_SYMBOL_GPL(vxlan_sock_release); /* Callback to update multicast group membership when first VNI on * multicast asddress is brought up * Done as workqueue because ip_mc_join_group acquires RTNL. */ static void vxlan_igmp_join(struct work_struct *work) { struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; union vxlan_addr *ip = &vxlan->default_dst.remote_ip; int ifindex = vxlan->default_dst.remote_ifindex; lock_sock(sk); if (ip->sa.sa_family == AF_INET) { struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, .imr_ifindex = ifindex, }; ip_mc_join_group(sk, &mreq); #if IS_ENABLED(CONFIG_IPV6) } else { ipv6_stub->ipv6_sock_mc_join(sk, ifindex, &ip->sin6.sin6_addr); #endif } release_sock(sk); vxlan_sock_release(vs); dev_put(vxlan->dev); } /* Inverse of vxlan_igmp_join when last VNI is brought down */ static void vxlan_igmp_leave(struct work_struct *work) { struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; union vxlan_addr *ip = &vxlan->default_dst.remote_ip; int ifindex = vxlan->default_dst.remote_ifindex; lock_sock(sk); if (ip->sa.sa_family == AF_INET) { struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, .imr_ifindex = ifindex, }; ip_mc_leave_group(sk, &mreq); #if IS_ENABLED(CONFIG_IPV6) } else { ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, &ip->sin6.sin6_addr); #endif } release_sock(sk); vxlan_sock_release(vs); dev_put(vxlan->dev); } /* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct vxlan_sock *vs; struct vxlanhdr *vxh; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) goto error; /* Return packets with reserved bits set */ vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); if (vxh->vx_flags != htonl(VXLAN_FLAGS) || (vxh->vx_vni & htonl(0xff))) { netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); goto error; } if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) goto drop; vs = rcu_dereference_sk_user_data(sk); if (!vs) goto drop; /* If the NIC driver gave us an encapsulated packet * with the encapsulation mark, the device checksummed it * for us. Otherwise force the upper layers to verify it. */ if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) || !skb->encapsulation) skb->ip_summed = CHECKSUM_NONE; skb->encapsulation = 0; vs->rcv(vs, skb, vxh->vx_vni); return 0; drop: /* Consume bad packet */ kfree_skb(skb); return 0; error: /* Return non vxlan pkt */ return 1; } static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) { struct iphdr *oip = NULL; struct ipv6hdr *oip6 = NULL; struct vxlan_dev *vxlan; struct pcpu_sw_netstats *stats; union vxlan_addr saddr; __u32 vni; int err = 0; union vxlan_addr *remote_ip; vni = ntohl(vx_vni) >> 8; /* Is this VNI defined? */ vxlan = vxlan_vs_find_vni(vs, vni); if (!vxlan) goto drop; remote_ip = &vxlan->default_dst.remote_ip; skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, vxlan->dev); /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) goto drop; /* Re-examine inner Ethernet packet */ if (remote_ip->sa.sa_family == AF_INET) { oip = ip_hdr(skb); saddr.sin.sin_addr.s_addr = oip->saddr; saddr.sa.sa_family = AF_INET; #if IS_ENABLED(CONFIG_IPV6) } else { oip6 = ipv6_hdr(skb); saddr.sin6.sin6_addr = oip6->saddr; saddr.sa.sa_family = AF_INET6; #endif } if ((vxlan->flags & VXLAN_F_LEARN) && vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) goto drop; skb_reset_network_header(skb); if (oip6) err = IP6_ECN_decapsulate(oip6, skb); if (oip) err = IP_ECN_decapsulate(oip, skb); if (unlikely(err)) { if (log_ecn_error) { if (oip6) net_info_ratelimited("non-ECT from %pI6\n", &oip6->saddr); if (oip) net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", &oip->saddr, oip->tos); } if (err > 1) { ++vxlan->dev->stats.rx_frame_errors; ++vxlan->dev->stats.rx_errors; goto drop; } } stats = this_cpu_ptr(vxlan->dev->tstats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); netif_rx(skb); return; drop: /* Consume bad packet */ kfree_skb(skb); } static int arp_reduce(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); struct arphdr *parp; u8 *arpptr, *sha; __be32 sip, tip; struct neighbour *n; if (dev->flags & IFF_NOARP) goto out; if (!pskb_may_pull(skb, arp_hdr_len(dev))) { dev->stats.tx_dropped++; goto out; } parp = arp_hdr(skb); if ((parp->ar_hrd != htons(ARPHRD_ETHER) && parp->ar_hrd != htons(ARPHRD_IEEE802)) || parp->ar_pro != htons(ETH_P_IP) || parp->ar_op != htons(ARPOP_REQUEST) || parp->ar_hln != dev->addr_len || parp->ar_pln != 4) goto out; arpptr = (u8 *)parp + sizeof(struct arphdr); sha = arpptr; arpptr += dev->addr_len; /* sha */ memcpy(&sip, arpptr, sizeof(sip)); arpptr += sizeof(sip); arpptr += dev->addr_len; /* tha */ memcpy(&tip, arpptr, sizeof(tip)); if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) goto out; n = neigh_lookup(&arp_tbl, &tip, dev); if (n) { struct vxlan_fdb *f; struct sk_buff *reply; if (!(n->nud_state & NUD_CONNECTED)) { neigh_release(n); goto out; } f = vxlan_find_mac(vxlan, n->ha); if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { /* bridge-local neighbor */ neigh_release(n); goto out; } reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, n->ha, sha); neigh_release(n); if (reply == NULL) goto out; skb_reset_mac_header(reply); __skb_pull(reply, skb_network_offset(reply)); reply->ip_summed = CHECKSUM_UNNECESSARY; reply->pkt_type = PACKET_HOST; if (netif_rx_ni(reply) == NET_RX_DROP) dev->stats.rx_dropped++; } else if (vxlan->flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = tip, .sa.sa_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); } out: consume_skb(skb); return NETDEV_TX_OK; } #if IS_ENABLED(CONFIG_IPV6) static struct sk_buff *vxlan_na_create(struct sk_buff *request, struct neighbour *n, bool isrouter) { struct net_device *dev = request->dev; struct sk_buff *reply; struct nd_msg *ns, *na; struct ipv6hdr *pip6; u8 *daddr; int na_olen = 8; /* opt hdr + ETH_ALEN for target */ int ns_olen; int i, len; if (dev == NULL) return NULL; len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + sizeof(*na) + na_olen + dev->needed_tailroom; reply = alloc_skb(len, GFP_ATOMIC); if (reply == NULL) return NULL; reply->protocol = htons(ETH_P_IPV6); reply->dev = dev; skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); skb_push(reply, sizeof(struct ethhdr)); skb_set_mac_header(reply, 0); ns = (struct nd_msg *)skb_transport_header(request); daddr = eth_hdr(request)->h_source; ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); break; } } /* Ethernet header */ ether_addr_copy(eth_hdr(reply)->h_dest, daddr); ether_addr_copy(eth_hdr(reply)->h_source, n->ha); eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); reply->protocol = htons(ETH_P_IPV6); skb_pull(reply, sizeof(struct ethhdr)); skb_set_network_header(reply, 0); skb_put(reply, sizeof(struct ipv6hdr)); /* IPv6 header */ pip6 = ipv6_hdr(reply); memset(pip6, 0, sizeof(struct ipv6hdr)); pip6->version = 6; pip6->priority = ipv6_hdr(request)->priority; pip6->nexthdr = IPPROTO_ICMPV6; pip6->hop_limit = 255; pip6->daddr = ipv6_hdr(request)->saddr; pip6->saddr = *(struct in6_addr *)n->primary_key; skb_pull(reply, sizeof(struct ipv6hdr)); skb_set_transport_header(reply, 0); na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); /* Neighbor Advertisement */ memset(na, 0, sizeof(*na)+na_olen); na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; na->icmph.icmp6_router = isrouter; na->icmph.icmp6_override = 1; na->icmph.icmp6_solicited = 1; na->target = ns->target; ether_addr_copy(&na->opt[2], n->ha); na->opt[0] = ND_OPT_TARGET_LL_ADDR; na->opt[1] = na_olen >> 3; na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, csum_partial(na, sizeof(*na)+na_olen, 0)); pip6->payload_len = htons(sizeof(*na)+na_olen); skb_push(reply, sizeof(struct ipv6hdr)); reply->ip_summed = CHECKSUM_UNNECESSARY; return reply; } static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); struct nd_msg *msg; const struct ipv6hdr *iphdr; const struct in6_addr *saddr, *daddr; struct neighbour *n; struct inet6_dev *in6_dev; in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; if (!pskb_may_pull(skb, skb->len)) goto out; iphdr = ipv6_hdr(skb); saddr = &iphdr->saddr; daddr = &iphdr->daddr; msg = (struct nd_msg *)skb_transport_header(skb); if (msg->icmph.icmp6_code != 0 || msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) goto out; if (ipv6_addr_loopback(daddr) || ipv6_addr_is_multicast(&msg->target)) goto out; n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); if (n) { struct vxlan_fdb *f; struct sk_buff *reply; if (!(n->nud_state & NUD_CONNECTED)) { neigh_release(n); goto out; } f = vxlan_find_mac(vxlan, n->ha); if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { /* bridge-local neighbor */ neigh_release(n); goto out; } reply = vxlan_na_create(skb, n, !!(f ? f->flags & NTF_ROUTER : 0)); neigh_release(n); if (reply == NULL) goto out; if (netif_rx_ni(reply) == NET_RX_DROP) dev->stats.rx_dropped++; } else if (vxlan->flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin6.sin6_addr = msg->target, .sa.sa_family = AF_INET6, }; vxlan_ip_miss(dev, &ipa); } out: consume_skb(skb); return NETDEV_TX_OK; } #endif static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); struct neighbour *n; if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) return false; n = NULL; switch (ntohs(eth_hdr(skb)->h_proto)) { case ETH_P_IP: { struct iphdr *pip; if (!pskb_may_pull(skb, sizeof(struct iphdr))) return false; pip = ip_hdr(skb); n = neigh_lookup(&arp_tbl, &pip->daddr, dev); if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = pip->daddr, .sa.sa_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); return false; } break; } #if IS_ENABLED(CONFIG_IPV6) case ETH_P_IPV6: { struct ipv6hdr *pip6; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return false; pip6 = ipv6_hdr(skb); n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin6.sin6_addr = pip6->daddr, .sa.sa_family = AF_INET6, }; vxlan_ip_miss(dev, &ipa); return false; } break; } #endif default: return false; } if (n) { bool diff; diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); if (diff) { memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, dev->addr_len); memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); } neigh_release(n); return diff; } return false; } /* Compute source port for outgoing packet * first choice to use L4 flow hash since it will spread * better and maybe available from hardware * secondary choice is to use jhash on the Ethernet header */ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb) { unsigned int range = (port_max - port_min) + 1; u32 hash; hash = skb_get_hash(skb); if (!hash) hash = jhash(skb->data, 2 * ETH_ALEN, (__force u32) skb->protocol); return htons((((u64) hash * range) >> 32) + port_min); } EXPORT_SYMBOL_GPL(vxlan_src_port); static int handle_offloads(struct sk_buff *skb) { if (skb_is_gso(skb)) { int err = skb_unclone(skb, GFP_ATOMIC); if (unlikely(err)) return err; skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; } else if (skb->ip_summed != CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_NONE; return 0; } #if IS_ENABLED(CONFIG_IPV6) static int vxlan6_xmit_skb(struct vxlan_sock *vs, struct dst_entry *dst, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port, __be32 vni) { struct ipv6hdr *ip6h; struct vxlanhdr *vxh; struct udphdr *uh; int min_headroom; int err; if (!skb->encapsulation) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb_scrub_packet(skb, false); min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + VXLAN_HLEN + sizeof(struct ipv6hdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); if (unlikely(err)) return err; if (vlan_tx_tag_present(skb)) { if (WARN_ON(!__vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)))) return -ENOMEM; skb->vlan_tci = 0; } vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->dest = dst_port; uh->source = src_port; uh->len = htons(skb->len); uh->check = 0; memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_set(skb, dst); if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) { __wsum csum = skb_checksum(skb, 0, skb->len, 0); skb->ip_summed = CHECKSUM_UNNECESSARY; uh->check = csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_UDP, 0); } __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); ip6h->version = 6; ip6h->priority = prio; ip6h->flow_lbl[0] = 0; ip6h->flow_lbl[1] = 0; ip6h->flow_lbl[2] = 0; ip6h->payload_len = htons(skb->len); ip6h->nexthdr = IPPROTO_UDP; ip6h->hop_limit = ttl; ip6h->daddr = *daddr; ip6h->saddr = *saddr; err = handle_offloads(skb); if (err) return err; ip6tunnel_xmit(skb, dev); return 0; } #endif int vxlan_xmit_skb(struct vxlan_sock *vs, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni) { struct vxlanhdr *vxh; struct udphdr *uh; int min_headroom; int err; if (!skb->encapsulation) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + VXLAN_HLEN + sizeof(struct iphdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); if (unlikely(err)) return err; if (vlan_tx_tag_present(skb)) { if (WARN_ON(!__vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)))) return -ENOMEM; skb->vlan_tci = 0; } vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->dest = dst_port; uh->source = src_port; uh->len = htons(skb->len); uh->check = 0; err = handle_offloads(skb); if (err) return err; return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, false); } EXPORT_SYMBOL_GPL(vxlan_xmit_skb); /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct vxlan_dev *dst_vxlan) { struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; skb->dev = dst_vxlan->dev; __skb_pull(skb, skb_network_offset(skb)); if (remote_ip->sa.sa_family == AF_INET) { loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); loopback.sa.sa_family = AF_INET; #if IS_ENABLED(CONFIG_IPV6) } else { loopback.sin6.sin6_addr = in6addr_loopback; loopback.sa.sa_family = AF_INET6; #endif } if (dst_vxlan->flags & VXLAN_F_LEARN) vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; tx_stats->tx_bytes += skb->len; u64_stats_update_end(&tx_stats->syncp); if (netif_rx(skb) == NET_RX_SUCCESS) { u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; u64_stats_update_end(&rx_stats->syncp); } else { skb->dev->stats.rx_dropped++; } } static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_rdst *rdst, bool did_rsc) { struct vxlan_dev *vxlan = netdev_priv(dev); struct rtable *rt = NULL; const struct iphdr *old_iph; struct flowi4 fl4; union vxlan_addr *dst; __be16 src_port = 0, dst_port; u32 vni; __be16 df = 0; __u8 tos, ttl; int err; dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; vni = rdst->remote_vni; dst = &rdst->remote_ip; if (vxlan_addr_any(dst)) { if (did_rsc) { /* short-circuited back to local bridge */ vxlan_encap_bypass(skb, vxlan, vxlan); return; } goto drop; } old_iph = ip_hdr(skb); ttl = vxlan->ttl; if (!ttl && vxlan_addr_multicast(dst)) ttl = 1; tos = vxlan->tos; if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb); if (dst->sa.sa_family == AF_INET) { memset(&fl4, 0, sizeof(fl4)); fl4.flowi4_oif = rdst->remote_ifindex; fl4.flowi4_tos = RT_TOS(tos); fl4.daddr = dst->sin.sin_addr.s_addr; fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; rt = ip_route_output_key(dev_net(dev), &fl4); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &dst->sin.sin_addr.s_addr); dev->stats.tx_carrier_errors++; goto tx_error; } if (rt->dst.dev == dev) { netdev_dbg(dev, "circular route to %pI4\n", &dst->sin.sin_addr.s_addr); dev->stats.collisions++; goto rt_tx_error; } /* Bypass encapsulation if the destination is local */ if (rt->rt_flags & RTCF_LOCAL && !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; ip_rt_put(rt); dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); return; } tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, fl4.saddr, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, htonl(vni << 8)); if (err < 0) goto rt_tx_error; iptunnel_xmit_stats(err, &dev->stats, dev->tstats); #if IS_ENABLED(CONFIG_IPV6) } else { struct sock *sk = vxlan->vn_sock->sock->sk; struct dst_entry *ndst; struct flowi6 fl6; u32 flags; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = rdst->remote_ifindex; fl6.daddr = dst->sin6.sin6_addr; fl6.saddr = vxlan->saddr.sin6.sin6_addr; fl6.flowi6_proto = IPPROTO_UDP; if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); dev->stats.tx_carrier_errors++; goto tx_error; } if (ndst->dev == dev) { netdev_dbg(dev, "circular route to %pI6\n", &dst->sin6.sin6_addr); dst_release(ndst); dev->stats.collisions++; goto tx_error; } /* Bypass encapsulation if the destination is local */ flags = ((struct rt6_info *)ndst)->rt6i_flags; if (flags & RTF_LOCAL && !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; dst_release(ndst); dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); return; } ttl = ttl ? : ip6_dst_hoplimit(ndst); err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb, dev, &fl6.saddr, &fl6.daddr, 0, ttl, src_port, dst_port, htonl(vni << 8)); #endif } return; drop: dev->stats.tx_dropped++; goto tx_free; rt_tx_error: ip_rt_put(rt); tx_error: dev->stats.tx_errors++; tx_free: dev_kfree_skb(skb); } /* Transmit local packets over Vxlan * * Outer IP header inherits ECN and DF from inner header. * Outer UDP destination is the VXLAN assigned port. * source port is based on hash of flow */ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); struct ethhdr *eth; bool did_rsc = false; struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; skb_reset_mac_header(skb); eth = eth_hdr(skb); if ((vxlan->flags & VXLAN_F_PROXY)) { if (ntohs(eth->h_proto) == ETH_P_ARP) return arp_reduce(dev, skb); #if IS_ENABLED(CONFIG_IPV6) else if (ntohs(eth->h_proto) == ETH_P_IPV6 && skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) && ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { struct nd_msg *msg; msg = (struct nd_msg *)skb_transport_header(skb); if (msg->icmph.icmp6_code == 0 && msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return neigh_reduce(dev, skb); } #endif } f = vxlan_find_mac(vxlan, eth->h_dest); did_rsc = false; if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) && (ntohs(eth->h_proto) == ETH_P_IP || ntohs(eth->h_proto) == ETH_P_IPV6)) { did_rsc = route_shortcircuit(dev, skb); if (did_rsc) f = vxlan_find_mac(vxlan, eth->h_dest); } if (f == NULL) { f = vxlan_find_mac(vxlan, all_zeros_mac); if (f == NULL) { if ((vxlan->flags & VXLAN_F_L2MISS) && !is_multicast_ether_addr(eth->h_dest)) vxlan_fdb_miss(vxlan, eth->h_dest); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } } list_for_each_entry_rcu(rdst, &f->remotes, list) { struct sk_buff *skb1; if (!fdst) { fdst = rdst; continue; } skb1 = skb_clone(skb, GFP_ATOMIC); if (skb1) vxlan_xmit_one(skb1, dev, rdst, did_rsc); } if (fdst) vxlan_xmit_one(skb, dev, fdst, did_rsc); else kfree_skb(skb); return NETDEV_TX_OK; } /* Walk the forwarding table and purge stale entries */ static void vxlan_cleanup(unsigned long arg) { struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; unsigned int h; if (!netif_running(vxlan->dev)) return; spin_lock_bh(&vxlan->hash_lock); for (h = 0; h < FDB_HASH_SIZE; ++h) { struct hlist_node *p, *n; hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); unsigned long timeout; if (f->state & NUD_PERMANENT) continue; timeout = f->used + vxlan->age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, "garbage collect %pM\n", f->eth_addr); f->state = NUD_STALE; vxlan_fdb_destroy(vxlan, f); } else if (time_before(timeout, next_timer)) next_timer = timeout; } } spin_unlock_bh(&vxlan->hash_lock); mod_timer(&vxlan->age_timer, next_timer); } static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) { __u32 vni = vxlan->default_dst.remote_vni; vxlan->vn_sock = vs; hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); } /* Setup stats when device is created */ static int vxlan_init(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_sock *vs; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; spin_lock(&vn->sock_lock); vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); if (vs) { /* If we have a socket with same port already, reuse it */ atomic_inc(&vs->refcnt); vxlan_vs_add_dev(vs, vxlan); } else { /* otherwise make new socket outside of RTNL */ dev_hold(dev); queue_work(vxlan_wq, &vxlan->sock_work); } spin_unlock(&vn->sock_lock); return 0; } static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) { struct vxlan_fdb *f; spin_lock_bh(&vxlan->hash_lock); f = __vxlan_find_mac(vxlan, all_zeros_mac); if (f) vxlan_fdb_destroy(vxlan, f); spin_unlock_bh(&vxlan->hash_lock); } static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_sock *vs = vxlan->vn_sock; vxlan_fdb_delete_default(vxlan); if (vs) vxlan_sock_release(vs); free_percpu(dev->tstats); } /* Start ageing timer and join group when device is brought up */ static int vxlan_open(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_sock *vs = vxlan->vn_sock; /* socket hasn't been created */ if (!vs) return -ENOTCONN; if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { vxlan_sock_hold(vs); dev_hold(dev); queue_work(vxlan_wq, &vxlan->igmp_join); } if (vxlan->age_interval) mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); return 0; } /* Purge the forwarding table */ static void vxlan_flush(struct vxlan_dev *vxlan) { unsigned int h; spin_lock_bh(&vxlan->hash_lock); for (h = 0; h < FDB_HASH_SIZE; ++h) { struct hlist_node *p, *n; hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) vxlan_fdb_destroy(vxlan, f); } } spin_unlock_bh(&vxlan->hash_lock); } /* Cleanup timer and forwarding table on shutdown */ static int vxlan_stop(struct net_device *dev) { struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_sock *vs = vxlan->vn_sock; if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && !vxlan_group_used(vn, vxlan)) { vxlan_sock_hold(vs); dev_hold(dev); queue_work(vxlan_wq, &vxlan->igmp_leave); } del_timer_sync(&vxlan->age_timer); vxlan_flush(vxlan); return 0; } /* Stub, nothing needs to be done. */ static void vxlan_set_multicast_list(struct net_device *dev) { } static int vxlan_change_mtu(struct net_device *dev, int new_mtu) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; struct net_device *lowerdev; int max_mtu; lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex); if (lowerdev == NULL) return eth_change_mtu(dev, new_mtu); if (dst->remote_ip.sa.sa_family == AF_INET6) max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; else max_mtu = lowerdev->mtu - VXLAN_HEADROOM; if (new_mtu < 68 || new_mtu > max_mtu) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops vxlan_netdev_ops = { .ndo_init = vxlan_init, .ndo_uninit = vxlan_uninit, .ndo_open = vxlan_open, .ndo_stop = vxlan_stop, .ndo_start_xmit = vxlan_xmit, .ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_set_rx_mode = vxlan_set_multicast_list, .ndo_change_mtu = vxlan_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_fdb_add = vxlan_fdb_add, .ndo_fdb_del = vxlan_fdb_delete, .ndo_fdb_dump = vxlan_fdb_dump, }; /* Info for udev, that this is a virtual tunnel endpoint */ static struct device_type vxlan_type = { .name = "vxlan", }; /* Calls the ndo_add_vxlan_port of the caller in order to * supply the listening VXLAN udp ports. Callers are expected * to implement the ndo_add_vxlan_port. */ void vxlan_get_rx_port(struct net_device *dev) { struct vxlan_sock *vs; struct net *net = dev_net(dev); struct vxlan_net *vn = net_generic(net, vxlan_net_id); sa_family_t sa_family; __be16 port; unsigned int i; spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { port = inet_sk(vs->sock->sk)->inet_sport; sa_family = vs->sock->sk->sk_family; dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, port); } } spin_unlock(&vn->sock_lock); } EXPORT_SYMBOL_GPL(vxlan_get_rx_port); /* Initialize the device structure. */ static void vxlan_setup(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); unsigned int h; int low, high; eth_hw_addr_random(dev); ether_setup(dev); if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM; else dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = free_netdev; SET_NETDEV_DEVTYPE(dev, &vxlan_type); dev->tx_queue_len = 0; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_NETNS_LOCAL; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; dev->vlan_features = dev->features; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; INIT_LIST_HEAD(&vxlan->next); spin_lock_init(&vxlan->hash_lock); INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join); INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave); INIT_WORK(&vxlan->sock_work, vxlan_sock_work); init_timer_deferrable(&vxlan->age_timer); vxlan->age_timer.function = vxlan_cleanup; vxlan->age_timer.data = (unsigned long) vxlan; inet_get_local_port_range(dev_net(dev), &low, &high); vxlan->port_min = low; vxlan->port_max = high; vxlan->dst_port = htons(vxlan_port); vxlan->dev = dev; for (h = 0; h < FDB_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vxlan->fdb_head[h]); } static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_ID] = { .type = NLA_U32 }, [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { pr_debug("invalid link address (not ethernet)\n"); return -EINVAL; } if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { pr_debug("invalid all zero ethernet address\n"); return -EADDRNOTAVAIL; } } if (!data) return -EINVAL; if (data[IFLA_VXLAN_ID]) { __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); if (id >= VXLAN_VID_MASK) return -ERANGE; } if (data[IFLA_VXLAN_PORT_RANGE]) { const struct ifla_vxlan_port_range *p = nla_data(data[IFLA_VXLAN_PORT_RANGE]); if (ntohs(p->high) < ntohs(p->low)) { pr_debug("port range %u .. %u not valid\n", ntohs(p->low), ntohs(p->high)); return -EINVAL; } } return 0; } static void vxlan_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); } static const struct ethtool_ops vxlan_ethtool_ops = { .get_drvinfo = vxlan_get_drvinfo, .get_link = ethtool_op_get_link, }; static void vxlan_del_work(struct work_struct *work) { struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work); sk_release_kernel(vs->sock->sk); kfree_rcu(vs, rcu); } #if IS_ENABLED(CONFIG_IPV6) /* Create UDP socket for encapsulation receive. AF_INET6 socket * could be used for both IPv4 and IPv6 communications, but * users may set bindv6only=1. */ static struct socket *create_v6_sock(struct net *net, __be16 port) { struct sock *sk; struct socket *sock; struct sockaddr_in6 vxlan_addr = { .sin6_family = AF_INET6, .sin6_port = port, }; int rc, val = 1; rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock); if (rc < 0) { pr_debug("UDPv6 socket create failed\n"); return ERR_PTR(rc); } /* Put in proper namespace */ sk = sock->sk; sk_change_net(sk, net); kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY, (char *)&val, sizeof(val)); rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr, sizeof(struct sockaddr_in6)); if (rc < 0) { pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n", &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc); sk_release_kernel(sk); return ERR_PTR(rc); } /* At this point, IPv6 module should have been loaded in * sock_create_kern(). */ BUG_ON(!ipv6_stub); /* Disable multicast loopback */ inet_sk(sk)->mc_loop = 0; return sock; } #else static struct socket *create_v6_sock(struct net *net, __be16 port) { return ERR_PTR(-EPFNOSUPPORT); } #endif static struct socket *create_v4_sock(struct net *net, __be16 port) { struct sock *sk; struct socket *sock; struct sockaddr_in vxlan_addr = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = port, }; int rc; /* Create UDP socket for encapsulation receive. */ rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (rc < 0) { pr_debug("UDP socket create failed\n"); return ERR_PTR(rc); } /* Put in proper namespace */ sk = sock->sk; sk_change_net(sk, net); rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr, sizeof(vxlan_addr)); if (rc < 0) { pr_debug("bind for UDP socket %pI4:%u (%d)\n", &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc); sk_release_kernel(sk); return ERR_PTR(rc); } /* Disable multicast loopback */ inet_sk(sk)->mc_loop = 0; return sock; } /* Create new listen socket if needed */ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, vxlan_rcv_t *rcv, void *data, bool ipv6) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; struct socket *sock; struct sock *sk; unsigned int h; vs = kzalloc(sizeof(*vs), GFP_KERNEL); if (!vs) return ERR_PTR(-ENOMEM); for (h = 0; h < VNI_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vs->vni_list[h]); INIT_WORK(&vs->del_work, vxlan_del_work); if (ipv6) sock = create_v6_sock(net, port); else sock = create_v4_sock(net, port); if (IS_ERR(sock)) { kfree(vs); return ERR_CAST(sock); } vs->sock = sock; sk = sock->sk; atomic_set(&vs->refcnt, 1); vs->rcv = rcv; vs->data = data; rcu_assign_sk_user_data(vs->sock->sk, vs); /* Initialize the vxlan udp offloads structure */ vs->udp_offloads.port = port; vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive; vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete; spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); vxlan_notify_add_rx_port(vs); spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ udp_sk(sk)->encap_type = 1; udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) ipv6_stub->udpv6_encap_enable(); else #endif udp_encap_enable(); return vs; } struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, vxlan_rcv_t *rcv, void *data, bool no_share, bool ipv6) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; vs = vxlan_socket_create(net, port, rcv, data, ipv6); if (!IS_ERR(vs)) return vs; if (no_share) /* Return error if sharing is not allowed. */ return vs; spin_lock(&vn->sock_lock); vs = vxlan_find_sock(net, port); if (vs) { if (vs->rcv == rcv) atomic_inc(&vs->refcnt); else vs = ERR_PTR(-EBUSY); } spin_unlock(&vn->sock_lock); if (!vs) vs = ERR_PTR(-EINVAL); return vs; } EXPORT_SYMBOL_GPL(vxlan_sock_add); /* Scheduled at device creation to bind to a socket */ static void vxlan_sock_work(struct work_struct *work) { struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work); struct net *net = dev_net(vxlan->dev); struct vxlan_net *vn = net_generic(net, vxlan_net_id); __be16 port = vxlan->dst_port; struct vxlan_sock *nvs; nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6); spin_lock(&vn->sock_lock); if (!IS_ERR(nvs)) vxlan_vs_add_dev(nvs, vxlan); spin_unlock(&vn->sock_lock); dev_put(vxlan->dev); } static int vxlan_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; __u32 vni; int err; bool use_ipv6 = false; if (!data[IFLA_VXLAN_ID]) return -EINVAL; vni = nla_get_u32(data[IFLA_VXLAN_ID]); dst->remote_vni = vni; /* Unless IPv6 is explicitly requested, assume IPv4 */ dst->remote_ip.sa.sa_family = AF_INET; if (data[IFLA_VXLAN_GROUP]) { dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]); } else if (data[IFLA_VXLAN_GROUP6]) { if (!IS_ENABLED(CONFIG_IPV6)) return -EPFNOSUPPORT; nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6], sizeof(struct in6_addr)); dst->remote_ip.sa.sa_family = AF_INET6; use_ipv6 = true; } if (data[IFLA_VXLAN_LOCAL]) { vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); vxlan->saddr.sa.sa_family = AF_INET; } else if (data[IFLA_VXLAN_LOCAL6]) { if (!IS_ENABLED(CONFIG_IPV6)) return -EPFNOSUPPORT; /* TODO: respect scope id */ nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6], sizeof(struct in6_addr)); vxlan->saddr.sa.sa_family = AF_INET6; use_ipv6 = true; } if (data[IFLA_VXLAN_LINK] && (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { struct net_device *lowerdev = __dev_get_by_index(net, dst->remote_ifindex); if (!lowerdev) { pr_info("ifindex %d does not exist\n", dst->remote_ifindex); return -ENODEV; } #if IS_ENABLED(CONFIG_IPV6) if (use_ipv6) { struct inet6_dev *idev = __in6_dev_get(lowerdev); if (idev && idev->cnf.disable_ipv6) { pr_info("IPv6 is disabled via sysctl\n"); return -EPERM; } vxlan->flags |= VXLAN_F_IPV6; } #endif if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); /* update header length based on lower device */ dev->hard_header_len = lowerdev->hard_header_len + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); } else if (use_ipv6) vxlan->flags |= VXLAN_F_IPV6; if (data[IFLA_VXLAN_TOS]) vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); if (data[IFLA_VXLAN_TTL]) vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) vxlan->flags |= VXLAN_F_LEARN; if (data[IFLA_VXLAN_AGEING]) vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); else vxlan->age_interval = FDB_AGE_DEFAULT; if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) vxlan->flags |= VXLAN_F_PROXY; if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) vxlan->flags |= VXLAN_F_RSC; if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) vxlan->flags |= VXLAN_F_L2MISS; if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) vxlan->flags |= VXLAN_F_L3MISS; if (data[IFLA_VXLAN_LIMIT]) vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); if (data[IFLA_VXLAN_PORT_RANGE]) { const struct ifla_vxlan_port_range *p = nla_data(data[IFLA_VXLAN_PORT_RANGE]); vxlan->port_min = ntohs(p->low); vxlan->port_max = ntohs(p->high); } if (data[IFLA_VXLAN_PORT]) vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); if (vxlan_find_vni(net, vni, vxlan->dst_port)) { pr_info("duplicate VNI %u\n", vni); return -EEXIST; } SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); /* create an fdb entry for a valid default destination */ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { err = vxlan_fdb_create(vxlan, all_zeros_mac, &vxlan->default_dst.remote_ip, NUD_REACHABLE|NUD_PERMANENT, NLM_F_EXCL|NLM_F_CREATE, vxlan->dst_port, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, NTF_SELF); if (err) return err; } err = register_netdevice(dev); if (err) { vxlan_fdb_delete_default(vxlan); return err; } list_add(&vxlan->next, &vn->vxlan_list); return 0; } static void vxlan_dellink(struct net_device *dev, struct list_head *head) { struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); spin_lock(&vn->sock_lock); if (!hlist_unhashed(&vxlan->hlist)) hlist_del_rcu(&vxlan->hlist); spin_unlock(&vn->sock_lock); list_del(&vxlan->next); unregister_netdevice_queue(dev, head); } static size_t vxlan_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ nla_total_size(sizeof(struct ifla_vxlan_port_range)) + nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */ 0; } static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { const struct vxlan_dev *vxlan = netdev_priv(dev); const struct vxlan_rdst *dst = &vxlan->default_dst; struct ifla_vxlan_port_range ports = { .low = htons(vxlan->port_min), .high = htons(vxlan->port_max), }; if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) goto nla_put_failure; if (!vxlan_addr_any(&dst->remote_ip)) { if (dst->remote_ip.sa.sa_family == AF_INET) { if (nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip.sin.sin_addr.s_addr)) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) } else { if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr), &dst->remote_ip.sin6.sin6_addr)) goto nla_put_failure; #endif } } if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) goto nla_put_failure; if (!vxlan_addr_any(&vxlan->saddr)) { if (vxlan->saddr.sa.sa_family == AF_INET) { if (nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr.sin.sin_addr.s_addr)) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) } else { if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr), &vxlan->saddr.sin6.sin6_addr)) goto nla_put_failure; #endif } } if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, !!(vxlan->flags & VXLAN_F_LEARN)) || nla_put_u8(skb, IFLA_VXLAN_PROXY, !!(vxlan->flags & VXLAN_F_PROXY)) || nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) || nla_put_u8(skb, IFLA_VXLAN_L2MISS, !!(vxlan->flags & VXLAN_F_L2MISS)) || nla_put_u8(skb, IFLA_VXLAN_L3MISS, !!(vxlan->flags & VXLAN_F_L3MISS)) || nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) || nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port)) goto nla_put_failure; if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops vxlan_link_ops __read_mostly = { .kind = "vxlan", .maxtype = IFLA_VXLAN_MAX, .policy = vxlan_policy, .priv_size = sizeof(struct vxlan_dev), .setup = vxlan_setup, .validate = vxlan_validate, .newlink = vxlan_newlink, .dellink = vxlan_dellink, .get_size = vxlan_get_size, .fill_info = vxlan_fill_info, }; static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, struct net_device *dev) { struct vxlan_dev *vxlan, *next; LIST_HEAD(list_kill); list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { struct vxlan_rdst *dst = &vxlan->default_dst; /* In case we created vxlan device with carrier * and we loose the carrier due to module unload * we also need to remove vxlan device. In other * cases, it's not necessary and remote_ifindex * is 0 here, so no matches. */ if (dst->remote_ifindex == dev->ifindex) vxlan_dellink(vxlan->dev, &list_kill); } unregister_netdevice_many(&list_kill); } static int vxlan_lowerdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); if (event == NETDEV_UNREGISTER) vxlan_handle_lowerdev_unregister(vn, dev); return NOTIFY_DONE; } static struct notifier_block vxlan_notifier_block __read_mostly = { .notifier_call = vxlan_lowerdev_event, }; static __net_init int vxlan_init_net(struct net *net) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); unsigned int h; INIT_LIST_HEAD(&vn->vxlan_list); spin_lock_init(&vn->sock_lock); for (h = 0; h < PORT_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vn->sock_list[h]); return 0; } static struct pernet_operations vxlan_net_ops = { .init = vxlan_init_net, .id = &vxlan_net_id, .size = sizeof(struct vxlan_net), }; static int __init vxlan_init_module(void) { int rc; vxlan_wq = alloc_workqueue("vxlan", 0, 0); if (!vxlan_wq) return -ENOMEM; get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); rc = register_pernet_subsys(&vxlan_net_ops); if (rc) goto out1; rc = register_netdevice_notifier(&vxlan_notifier_block); if (rc) goto out2; rc = rtnl_link_register(&vxlan_link_ops); if (rc) goto out3; return 0; out3: unregister_netdevice_notifier(&vxlan_notifier_block); out2: unregister_pernet_subsys(&vxlan_net_ops); out1: destroy_workqueue(vxlan_wq); return rc; } late_initcall(vxlan_init_module); static void __exit vxlan_cleanup_module(void) { rtnl_link_unregister(&vxlan_link_ops); unregister_netdevice_notifier(&vxlan_notifier_block); destroy_workqueue(vxlan_wq); unregister_pernet_subsys(&vxlan_net_ops); /* rcu_barrier() is called by netns */ } module_exit(vxlan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(VXLAN_VERSION); MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("vxlan");
gpl-2.0
fedya/aircam-openwrt
build_dir/linux-gm812x/linux-2.6.28.fa2/drivers/usb/wusbcore/crypto.c
26
17127
/* * Ultra Wide Band * AES-128 CCM Encryption * * Copyright (C) 2007 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * We don't do any encryption here; we use the Linux Kernel's AES-128 * crypto modules to construct keys and payload blocks in a way * defined by WUSB1.0[6]. Check the erratas, as typos are are patched * there. * * Thanks a zillion to John Keys for his help and clarifications over * the designed-by-a-committee text. * * So the idea is that there is this basic Pseudo-Random-Function * defined in WUSB1.0[6.5] which is the core of everything. It works * by tweaking some blocks, AES crypting them and then xoring * something else with them (this seems to be called CBC(AES) -- can * you tell I know jack about crypto?). So we just funnel it into the * Linux Crypto API. * * We leave a crypto test module so we can verify that vectors match, * every now and then. * * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I * am learning a lot... * * Conveniently, some data structures that need to be * funneled through AES are...16 bytes in size! */ #include <linux/crypto.h> #include <linux/module.h> #include <linux/err.h> #include <linux/uwb.h> #include <linux/usb/wusb.h> #include <linux/scatterlist.h> #define D_LOCAL 0 #include <linux/uwb/debug.h> /* * Block of data, as understood by AES-CCM * * The code assumes this structure is nothing but a 16 byte array * (packed in a struct to avoid common mess ups that I usually do with * arrays and enforcing type checking). */ struct aes_ccm_block { u8 data[16]; } __attribute__((packed)); /* * Counter-mode Blocks (WUSB1.0[6.4]) * * According to CCM (or so it seems), for the purpose of calculating * the MIC, the message is broken in N counter-mode blocks, B0, B1, * ... BN. * * B0 contains flags, the CCM nonce and l(m). * * B1 contains l(a), the MAC header, the encryption offset and padding. * * If EO is nonzero, additional blocks are built from payload bytes * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The * padding is not xmitted. */ /* WUSB1.0[T6.4] */ struct aes_ccm_b0 { u8 flags; /* 0x59, per CCM spec */ struct aes_ccm_nonce ccm_nonce; __be16 lm; } __attribute__((packed)); /* WUSB1.0[T6.5] */ struct aes_ccm_b1 { __be16 la; u8 mac_header[10]; __le16 eo; u8 security_reserved; /* This is always zero */ u8 padding; /* 0 */ } __attribute__((packed)); /* * Encryption Blocks (WUSB1.0[6.4.4]) * * CCM uses Ax blocks to generate a keystream with which the MIC and * the message's payload are encoded. A0 always encrypts/decrypts the * MIC. Ax (x>0) are used for the sucesive payload blocks. * * The x is the counter, and is increased for each block. */ struct aes_ccm_a { u8 flags; /* 0x01, per CCM spec */ struct aes_ccm_nonce ccm_nonce; __be16 counter; /* Value of x */ } __attribute__((packed)); static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2, size_t size) { u8 *bo = _bo; const u8 *bi1 = _bi1, *bi2 = _bi2; size_t itr; for (itr = 0; itr < size; itr++) bo[itr] = bi1[itr] ^ bi2[itr]; } /* * CC-MAC function WUSB1.0[6.5] * * Take a data string and produce the encrypted CBC Counter-mode MIC * * Note the names for most function arguments are made to (more or * less) match those used in the pseudo-function definition given in * WUSB1.0[6.5]. * * @tfm_cbc: CBC(AES) blkcipher handle (initialized) * * @tfm_aes: AES cipher handle (initialized) * * @mic: buffer for placing the computed MIC (Message Integrity * Code). This is exactly 8 bytes, and we expect the buffer to * be at least eight bytes in length. * * @key: 128 bit symmetric key * * @n: CCM nonce * * @a: ASCII string, 14 bytes long (I guess zero padded if needed; * we use exactly 14 bytes). * * @b: data stream to be processed; cannot be a global or const local * (will confuse the scatterlists) * * @blen: size of b... * * Still not very clear how this is done, but looks like this: we * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we * take the payload and divide it in blocks (16 bytes), xor them with * the previous crypto result (16 bytes) and crypt it, repeat the next * block with the output of the previous one, rinse wash (I guess this * is what AES CBC mode means...but I truly have no idea). So we use * the CBC(AES) blkcipher, that does precisely that. The IV (Initial * Vector) is 16 bytes and is set to zero, so * * See rfc3610. Linux crypto has a CBC implementation, but the * documentation is scarce, to say the least, and the example code is * so intricated that is difficult to understand how things work. Most * of this is guess work -- bite me. * * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and * using the 14 bytes of @a to fill up * b1.{mac_header,e0,security_reserved,padding}. * * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of * l(m) is orthogonal, they bear no relationship, so it is not * in conflict with the parameter's relation that * WUSB1.0[6.4.2]) defines. * * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in * first errata released on 2005/07. * * NOTE: we need to clean IV to zero at each invocation to make sure * we start with a fresh empty Initial Vector, so that the CBC * works ok. * * NOTE: blen is not aligned to a block size, we'll pad zeros, that's * what sg[4] is for. Maybe there is a smarter way to do this. */ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, struct crypto_cipher *tfm_aes, void *mic, const struct aes_ccm_nonce *n, const struct aes_ccm_label *a, const void *b, size_t blen) { int result = 0; struct blkcipher_desc desc; struct aes_ccm_b0 b0; struct aes_ccm_b1 b1; struct aes_ccm_a ax; struct scatterlist sg[4], sg_dst; void *iv, *dst_buf; size_t ivsize, dst_size; const u8 bzero[16] = { 0 }; size_t zero_padding; d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " "n %p, a %p, b %p, blen %zu)\n", tfm_cbc, tfm_aes, mic, n, a, b, blen); /* * These checks should be compile time optimized out * ensure @a fills b1's mac_header and following fields */ WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); result = -ENOMEM; zero_padding = sizeof(struct aes_ccm_block) - blen % sizeof(struct aes_ccm_block); zero_padding = blen % sizeof(struct aes_ccm_block); if (zero_padding) zero_padding = sizeof(struct aes_ccm_block) - zero_padding; dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; dst_buf = kzalloc(dst_size, GFP_KERNEL); if (dst_buf == NULL) { printk(KERN_ERR "E: can't alloc destination buffer\n"); goto error_dst_buf; } iv = crypto_blkcipher_crt(tfm_cbc)->iv; ivsize = crypto_blkcipher_ivsize(tfm_cbc); memset(iv, 0, ivsize); /* Setup B0 */ b0.flags = 0x59; /* Format B0 */ b0.ccm_nonce = *n; b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ /* Setup B1 * * The WUSB spec is anything but clear! WUSB1.0[6.5] * says that to initialize B1 from A with 'l(a) = blen + * 14'--after clarification, it means to use A's contents * for MAC Header, EO, sec reserved and padding. */ b1.la = cpu_to_be16(blen + 14); memcpy(&b1.mac_header, a, sizeof(*a)); d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0)); d_dump(4, NULL, &b0, sizeof(b0)); d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1)); d_dump(4, NULL, &b1, sizeof(b1)); d_printf(4, NULL, "I: B (%zu bytes)\n", blen); d_dump(4, NULL, b, blen); d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding); d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize); d_dump(4, NULL, iv, ivsize); sg_init_table(sg, ARRAY_SIZE(sg)); sg_set_buf(&sg[0], &b0, sizeof(b0)); sg_set_buf(&sg[1], &b1, sizeof(b1)); sg_set_buf(&sg[2], b, blen); /* 0 if well behaved :) */ sg_set_buf(&sg[3], bzero, zero_padding); sg_init_one(&sg_dst, dst_buf, dst_size); desc.tfm = tfm_cbc; desc.flags = 0; result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size); if (result < 0) { printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n", result); goto error_cbc_crypt; } d_printf(4, NULL, "D: MIC tag\n"); d_dump(4, NULL, iv, ivsize); /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] * The procedure is to AES crypt the A0 block and XOR the MIC * Tag agains it; we only do the first 8 bytes and place it * directly in the destination buffer. * * POS Crypto API: size is assumed to be AES's block size. * Thanks for documenting it -- tip taken from airo.c */ ax.flags = 0x01; /* as per WUSB 1.0 spec */ ax.ccm_nonce = *n; ax.counter = 0; crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); bytewise_xor(mic, &ax, iv, 8); d_printf(4, NULL, "D: CTR[MIC]\n"); d_dump(4, NULL, &ax, 8); d_printf(4, NULL, "D: CCM-MIC tag\n"); d_dump(4, NULL, mic, 8); result = 8; error_cbc_crypt: kfree(dst_buf); error_dst_buf: d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " "n %p, a %p, b %p, blen %zu)\n", tfm_cbc, tfm_aes, mic, n, a, b, blen); return result; } /* * WUSB Pseudo Random Function (WUSB1.0[6.5]) * * @b: buffer to the source data; cannot be a global or const local * (will confuse the scatterlists) */ ssize_t wusb_prf(void *out, size_t out_size, const u8 key[16], const struct aes_ccm_nonce *_n, const struct aes_ccm_label *a, const void *b, size_t blen, size_t len) { ssize_t result, bytes = 0, bitr; struct aes_ccm_nonce n = *_n; struct crypto_blkcipher *tfm_cbc; struct crypto_cipher *tfm_aes; u64 sfn = 0; __le64 sfn_le; d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " "a %p, b %p, blen %zu, len %zu)\n", out, out_size, key, _n, a, b, blen, len); tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_cbc)) { result = PTR_ERR(tfm_cbc); printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); goto error_alloc_cbc; } result = crypto_blkcipher_setkey(tfm_cbc, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); goto error_setkey_cbc; } tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_aes)) { result = PTR_ERR(tfm_aes); printk(KERN_ERR "E: can't load AES: %d\n", (int)result); goto error_alloc_aes; } result = crypto_cipher_setkey(tfm_aes, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); goto error_setkey_aes; } for (bitr = 0; bitr < (len + 63) / 64; bitr++) { sfn_le = cpu_to_le64(sfn++); memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, &n, a, b, blen); if (result < 0) goto error_ccm_mac; bytes += result; } result = bytes; error_ccm_mac: error_setkey_aes: crypto_free_cipher(tfm_aes); error_alloc_aes: error_setkey_cbc: crypto_free_blkcipher(tfm_cbc); error_alloc_cbc: d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size, key, _n, a, b, blen, len, (int)bytes); return result; } /* WUSB1.0[A.2] test vectors */ static const u8 stv_hsmic_key[16] = { 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f }; static const struct aes_ccm_nonce stv_hsmic_n = { .sfn = { 0 }, .tkid = { 0x76, 0x98, 0x01, }, .dest_addr = { .data = { 0xbe, 0x00 } }, .src_addr = { .data = { 0x76, 0x98 } }, }; /* * Out-of-band MIC Generation verification code * */ static int wusb_oob_mic_verify(void) { int result; u8 mic[8]; /* WUSB1.0[A.2] test vectors * * Need to keep it in the local stack as GCC 4.1.3something * messes up and generates noise. */ struct usb_handshake stv_hsmic_hs = { .bMessageNumber = 2, .bStatus = 00, .tTKID = { 0x76, 0x98, 0x01 }, .bReserved = 00, .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f }, .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f }, .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c, 0x14, 0x7b } , }; size_t hs_size; result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs); if (result < 0) printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result); else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) { printk(KERN_ERR "E: OOB MIC test: " "mismatch between MIC result and WUSB1.0[A2]\n"); hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); dump_bytes(NULL, &stv_hsmic_hs, hs_size); printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", sizeof(stv_hsmic_n)); dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n)); printk(KERN_ERR "E: MIC out:\n"); dump_bytes(NULL, mic, sizeof(mic)); printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); result = -EINVAL; } else result = 0; return result; } /* * Test vectors for Key derivation * * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1] * (errata corrected in 2005/07). */ static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f }; static const struct aes_ccm_nonce stv_keydvt_n_a1 = { .sfn = { 0 }, .tkid = { 0x76, 0x98, 0x01, }, .dest_addr = { .data = { 0xbe, 0x00 } }, .src_addr = { .data = { 0x76, 0x98 } }, }; static const struct wusb_keydvt_out stv_keydvt_out_a1 = { .kck = { 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f }, .ptk = { 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06, 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d } }; /* * Performa a test to make sure we match the vectors defined in * WUSB1.0[A.1](Errata2006/12) */ static int wusb_key_derive_verify(void) { int result = 0; struct wusb_keydvt_out keydvt_out; /* These come from WUSB1.0[A.1] + 2006/12 errata * NOTE: can't make this const or global -- somehow it seems * the scatterlists for crypto get confused and we get * bad data. There is no doc on this... */ struct wusb_keydvt_in stv_keydvt_in_a1 = { .hnonce = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, .dnonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f } }; result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1, &stv_keydvt_in_a1); if (result < 0) printk(KERN_ERR "E: WUSB key derivation test: " "derivation failed: %d\n", result); if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) { printk(KERN_ERR "E: WUSB key derivation test: " "mismatch between key derivation result " "and WUSB1.0[A1] Errata 2006/12\n"); printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n", sizeof(stv_key_a1)); dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1)); printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n", sizeof(stv_keydvt_n_a1)); dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n", sizeof(stv_keydvt_in_a1)); dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); printk(KERN_ERR "E: keydvt out: KCK\n"); dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck)); printk(KERN_ERR "E: keydvt out: PTK\n"); dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk)); result = -EINVAL; } else result = 0; return result; } /* * Initialize crypto system * * FIXME: we do nothing now, other than verifying. Later on we'll * cache the encryption stuff, so that's why we have a separate init. */ int wusb_crypto_init(void) { int result; result = wusb_key_derive_verify(); if (result < 0) return result; return wusb_oob_mic_verify(); } void wusb_crypto_exit(void) { /* FIXME: free cached crypto transforms */ }
gpl-2.0
focuschou/android_kernel_samsung_piranha
arch/arm/mach-omap2/board-gokey-emif.c
26
2316
/* * LPDDR2 data as per SAMSUNG data sheet * * Copyright (C) 2011 Texas Instruments, Inc. * * Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <mach/emif.h> #include "board-gokey.h" static const struct lpddr2_timings gokey_lpddr2_samsung_timings_400_mhz = { .max_freq = 400000000, .RL = 6, .tRPab = 21, .tRCD = 18, .tWR = 15, .tRASmin = 42, .tRRD = 10, .tWTRx2 = 15, .tXSR = 140, .tXPx2 = 15, .tRFCab = 130, .tRTPx2 = 15, .tCKE = 3, .tCKESR = 15, .tZQCS = 90, .tZQCL = 360, .tZQINIT = 1000, .tDQSCKMAXx2 = 11, .tRASmax = 70, .tFAW = 50, }; static const struct lpddr2_timings gokey_lpddr2_samsung_timings_200_mhz = { .max_freq = 200000000, .RL = 3, .tRPab = 21, .tRCD = 18, .tWR = 15, .tRASmin = 42, .tRRD = 10, .tWTRx2 = 20, .tXSR = 140, .tXPx2 = 15, .tRFCab = 130, .tRTPx2 = 15, .tCKE = 3, .tCKESR = 15, .tZQCS = 90, .tZQCL = 360, .tZQINIT = 1000, .tDQSCKMAXx2 = 11, .tRASmax = 70, .tFAW = 50 }; static const struct lpddr2_min_tck gokey_lpddr2_samsung_min_tck = { .tRL = 3, .tRP_AB = 3, .tRCD = 3, .tWR = 3, .tRAS_MIN = 3, .tRRD = 2, .tWTR = 2, .tXP = 2, .tRTP = 2, .tCKE = 3, .tCKESR = 3, .tFAW = 8 }; static struct lpddr2_device_info gokey_lpddr2_samsung_2G_S4_dev = { .device_timings = { &gokey_lpddr2_samsung_timings_200_mhz, &gokey_lpddr2_samsung_timings_400_mhz }, .min_tck = &gokey_lpddr2_samsung_min_tck, .type = LPDDR2_TYPE_S4, .density = LPDDR2_DENSITY_2Gb, .io_width = LPDDR2_IO_WIDTH_32, .emif_ddr_selfrefresh_cycles = 262144, }; /* * LPDDR2 Configeration Data: * The memory organisation is as below : * EMIF1 - CS0 - 2 Gb * CS1 - 2 Gb * EMIF2 - CS0 - 2 Gb * CS1 - 2 Gb * -------------------- * TOTAL - 8 Gb * * Same devices installed on EMIF1 and EMIF2 */ static __initdata struct emif_device_details gokey_emif_devices_qdp = { .cs0_device = &gokey_lpddr2_samsung_2G_S4_dev, .cs1_device = &gokey_lpddr2_samsung_2G_S4_dev }; void __init omap4_gokey_emif_init(void) { omap_emif_setup_device_details(&gokey_emif_devices_qdp, &gokey_emif_devices_qdp); }
gpl-2.0
bftg/gcc-5.3.0
gcc/genattr.c
26
14168
/* Generate attribute information (insn-attr.h) from machine description. Copyright (C) 1991-2015 Free Software Foundation, Inc. Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "bconfig.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "rtl.h" #include "errors.h" #include "read-md.h" #include "gensupport.h" static void gen_attr (rtx); static vec<rtx> const_attrs, reservations; static void gen_attr (rtx attr) { const char *p; int is_const = GET_CODE (XEXP (attr, 2)) == CONST; if (is_const) const_attrs.safe_push (attr); printf ("#define HAVE_ATTR_%s 1\n", XSTR (attr, 0)); /* If numeric attribute, don't need to write an enum. */ if (GET_CODE (attr) == DEFINE_ENUM_ATTR) printf ("extern enum %s get_attr_%s (%s);\n\n", XSTR (attr, 1), XSTR (attr, 0), (is_const ? "void" : "rtx_insn *")); else { p = XSTR (attr, 1); if (*p == '\0') printf ("extern int get_attr_%s (%s);\n", XSTR (attr, 0), (is_const ? "void" : "rtx_insn *")); else printf ("extern enum attr_%s get_attr_%s (%s);\n\n", XSTR (attr, 0), XSTR (attr, 0), (is_const ? "void" : "rtx_insn *")); } /* If `length' attribute, write additional function definitions and define variables used by `insn_current_length'. */ if (! strcmp (XSTR (attr, 0), "length")) { puts ("\ extern void shorten_branches (rtx_insn *);\n\ extern int insn_default_length (rtx_insn *);\n\ extern int insn_min_length (rtx_insn *);\n\ extern int insn_variable_length_p (rtx_insn *);\n\ extern int insn_current_length (rtx_insn *);\n\n\ #include \"insn-addr.h\"\n"); } } /* Check that attribute NAME is used in define_insn_reservation condition EXP. Return true if it is. */ static bool check_tune_attr (const char *name, rtx exp) { switch (GET_CODE (exp)) { case AND: if (check_tune_attr (name, XEXP (exp, 0))) return true; return check_tune_attr (name, XEXP (exp, 1)); case IOR: return (check_tune_attr (name, XEXP (exp, 0)) && check_tune_attr (name, XEXP (exp, 1))); case EQ_ATTR: return strcmp (XSTR (exp, 0), name) == 0; default: return false; } } /* Try to find a const attribute (usually cpu or tune) that is used in all define_insn_reservation conditions. */ static bool find_tune_attr (rtx exp) { unsigned int i; rtx attr; switch (GET_CODE (exp)) { case AND: case IOR: if (find_tune_attr (XEXP (exp, 0))) return true; return find_tune_attr (XEXP (exp, 1)); case EQ_ATTR: if (strcmp (XSTR (exp, 0), "alternative") == 0) return false; FOR_EACH_VEC_ELT (const_attrs, i, attr) if (strcmp (XSTR (attr, 0), XSTR (exp, 0)) == 0) { unsigned int j; rtx resv; FOR_EACH_VEC_ELT (reservations, j, resv) if (! check_tune_attr (XSTR (attr, 0), XEXP (resv, 2))) return false; return true; } return false; default: return false; } } int main (int argc, char **argv) { rtx desc; int have_delay = 0; int have_annul_true = 0; int have_annul_false = 0; int num_insn_reservations = 0; int i; progname = "genattr"; if (!init_rtx_reader_args (argc, argv)) return (FATAL_EXIT_CODE); puts ("/* Generated automatically by the program `genattr'"); puts (" from the machine description file `md'. */\n"); puts ("#ifndef GCC_INSN_ATTR_H"); puts ("#define GCC_INSN_ATTR_H\n"); puts ("#include \"insn-attr-common.h\"\n"); /* Read the machine description. */ while (1) { int line_no, insn_code_number; desc = read_md_rtx (&line_no, &insn_code_number); if (desc == NULL) break; if (GET_CODE (desc) == DEFINE_ATTR || GET_CODE (desc) == DEFINE_ENUM_ATTR) gen_attr (desc); else if (GET_CODE (desc) == DEFINE_DELAY) { if (! have_delay) { printf ("extern int num_delay_slots (rtx_insn *);\n"); printf ("extern int eligible_for_delay (rtx_insn *, int, rtx_insn *, int);\n\n"); printf ("extern int const_num_delay_slots (rtx_insn *);\n\n"); have_delay = 1; } for (i = 0; i < XVECLEN (desc, 1); i += 3) { if (XVECEXP (desc, 1, i + 1) && ! have_annul_true) { printf ("#define ANNUL_IFTRUE_SLOTS\n"); printf ("extern int eligible_for_annul_true (rtx_insn *, int, rtx_insn *, int);\n"); have_annul_true = 1; } if (XVECEXP (desc, 1, i + 2) && ! have_annul_false) { printf ("#define ANNUL_IFFALSE_SLOTS\n"); printf ("extern int eligible_for_annul_false (rtx_insn *, int, rtx_insn *, int);\n"); have_annul_false = 1; } } } else if (GET_CODE (desc) == DEFINE_INSN_RESERVATION) { num_insn_reservations++; reservations.safe_push (desc); } } if (num_insn_reservations > 0) { bool has_tune_attr = find_tune_attr (XEXP (reservations[0], 2)); /* Output interface for pipeline hazards recognition based on DFA (deterministic finite state automata. */ printf ("\n/* DFA based pipeline interface. */"); printf ("\n#ifndef AUTOMATON_ALTS\n"); printf ("#define AUTOMATON_ALTS 0\n"); printf ("#endif\n\n"); printf ("\n#ifndef AUTOMATON_STATE_ALTS\n"); printf ("#define AUTOMATON_STATE_ALTS 0\n"); printf ("#endif\n\n"); printf ("#ifndef CPU_UNITS_QUERY\n"); printf ("#define CPU_UNITS_QUERY 0\n"); printf ("#endif\n\n"); /* Interface itself: */ if (has_tune_attr) { printf ("/* Initialize fn pointers for internal_dfa_insn_code\n"); printf (" and insn_default_latency. */\n"); printf ("extern void init_sched_attrs (void);\n\n"); printf ("/* Internal insn code number used by automata. */\n"); printf ("extern int (*internal_dfa_insn_code) (rtx_insn *);\n\n"); printf ("/* Insn latency time defined in define_insn_reservation. */\n"); printf ("extern int (*insn_default_latency) (rtx_insn *);\n\n"); } else { printf ("#define init_sched_attrs() do { } while (0)\n\n"); printf ("/* Internal insn code number used by automata. */\n"); printf ("extern int internal_dfa_insn_code (rtx_insn *);\n\n"); printf ("/* Insn latency time defined in define_insn_reservation. */\n"); printf ("extern int insn_default_latency (rtx_insn *);\n\n"); } printf ("/* Return nonzero if there is a bypass for given insn\n"); printf (" which is a data producer. */\n"); printf ("extern int bypass_p (rtx_insn *);\n\n"); printf ("/* Insn latency time on data consumed by the 2nd insn.\n"); printf (" Use the function if bypass_p returns nonzero for\n"); printf (" the 1st insn. */\n"); printf ("extern int insn_latency (rtx, rtx);\n\n"); printf ("/* Maximal insn latency time possible of all bypasses for this insn.\n"); printf (" Use the function if bypass_p returns nonzero for\n"); printf (" the 1st insn. */\n"); printf ("extern int maximal_insn_latency (rtx);\n\n"); printf ("\n#if AUTOMATON_ALTS\n"); printf ("/* The following function returns number of alternative\n"); printf (" reservations of given insn. It may be used for better\n"); printf (" insns scheduling heuristics. */\n"); printf ("extern int insn_alts (rtx);\n\n"); printf ("#endif\n\n"); printf ("/* Maximal possible number of insns waiting results being\n"); printf (" produced by insns whose execution is not finished. */\n"); printf ("extern const int max_insn_queue_index;\n\n"); printf ("/* Pointer to data describing current state of DFA. */\n"); printf ("typedef void *state_t;\n\n"); printf ("/* Size of the data in bytes. */\n"); printf ("extern int state_size (void);\n\n"); printf ("/* Initiate given DFA state, i.e. Set up the state\n"); printf (" as all functional units were not reserved. */\n"); printf ("extern void state_reset (state_t);\n"); printf ("/* The following function returns negative value if given\n"); printf (" insn can be issued in processor state described by given\n"); printf (" DFA state. In this case, the DFA state is changed to\n"); printf (" reflect the current and future reservations by given\n"); printf (" insn. Otherwise the function returns minimal time\n"); printf (" delay to issue the insn. This delay may be zero\n"); printf (" for superscalar or VLIW processors. If the second\n"); printf (" parameter is NULL the function changes given DFA state\n"); printf (" as new processor cycle started. */\n"); printf ("extern int state_transition (state_t, rtx);\n"); printf ("\n#if AUTOMATON_STATE_ALTS\n"); printf ("/* The following function returns number of possible\n"); printf (" alternative reservations of given insn in given\n"); printf (" DFA state. It may be used for better insns scheduling\n"); printf (" heuristics. By default the function is defined if\n"); printf (" macro AUTOMATON_STATE_ALTS is defined because its\n"); printf (" implementation may require much memory. */\n"); printf ("extern int state_alts (state_t, rtx);\n"); printf ("#endif\n\n"); printf ("extern int min_issue_delay (state_t, rtx_insn *);\n"); printf ("/* The following function returns nonzero if no one insn\n"); printf (" can be issued in current DFA state. */\n"); printf ("extern int state_dead_lock_p (state_t);\n"); printf ("/* The function returns minimal delay of issue of the 2nd\n"); printf (" insn after issuing the 1st insn in given DFA state.\n"); printf (" The 1st insn should be issued in given state (i.e.\n"); printf (" state_transition should return negative value for\n"); printf (" the insn and the state). Data dependencies between\n"); printf (" the insns are ignored by the function. */\n"); printf ("extern int min_insn_conflict_delay (state_t, rtx, rtx);\n"); printf ("/* The following function outputs reservations for given\n"); printf (" insn as they are described in the corresponding\n"); printf (" define_insn_reservation. */\n"); printf ("extern void print_reservation (FILE *, rtx_insn *);\n"); printf ("\n#if CPU_UNITS_QUERY\n"); printf ("/* The following function returns code of functional unit\n"); printf (" with given name (see define_cpu_unit). */\n"); printf ("extern int get_cpu_unit_code (const char *);\n"); printf ("/* The following function returns nonzero if functional\n"); printf (" unit with given code is currently reserved in given\n"); printf (" DFA state. */\n"); printf ("extern int cpu_unit_reservation_p (state_t, int);\n"); printf ("#endif\n\n"); printf ("/* The following function returns true if insn\n"); printf (" has a dfa reservation. */\n"); printf ("extern bool insn_has_dfa_reservation_p (rtx_insn *);\n\n"); printf ("/* Clean insn code cache. It should be called if there\n"); printf (" is a chance that condition value in a\n"); printf (" define_insn_reservation will be changed after\n"); printf (" last call of dfa_start. */\n"); printf ("extern void dfa_clean_insn_cache (void);\n\n"); printf ("extern void dfa_clear_single_insn_cache (rtx_insn *);\n\n"); printf ("/* Initiate and finish work with DFA. They should be\n"); printf (" called as the first and the last interface\n"); printf (" functions. */\n"); printf ("extern void dfa_start (void);\n"); printf ("extern void dfa_finish (void);\n"); } else { /* Otherwise we do no scheduling, but we need these typedefs in order to avoid uglifying other code with more ifdefs. */ printf ("typedef void *state_t;\n\n"); } /* Special-purpose attributes should be tested with if, not #ifdef. */ const char * const special_attrs[] = { "length", "enabled", "preferred_for_size", "preferred_for_speed", 0 }; for (const char * const *p = special_attrs; *p; p++) { printf ("#ifndef HAVE_ATTR_%s\n" "#define HAVE_ATTR_%s 0\n" "#endif\n", *p, *p); } /* We make an exception here to provide stub definitions for insn_*_length* / get_attr_enabled functions. */ puts ("#if !HAVE_ATTR_length\n" "extern int hook_int_rtx_insn_unreachable (rtx_insn *);\n" "#define insn_default_length hook_int_rtx_insn_unreachable\n" "#define insn_min_length hook_int_rtx_insn_unreachable\n" "#define insn_variable_length_p hook_int_rtx_insn_unreachable\n" "#define insn_current_length hook_int_rtx_insn_unreachable\n" "#include \"insn-addr.h\"\n" "#endif\n" "extern int hook_int_rtx_1 (rtx);\n" "#if !HAVE_ATTR_enabled\n" "#define get_attr_enabled hook_int_rtx_1\n" "#endif\n" "#if !HAVE_ATTR_preferred_for_size\n" "#define get_attr_preferred_for_size hook_int_rtx_1\n" "#endif\n" "#if !HAVE_ATTR_preferred_for_speed\n" "#define get_attr_preferred_for_speed hook_int_rtx_1\n" "#endif\n"); /* Output flag masks for use by reorg. Flags are used to hold branch direction for use by eligible_for_... */ printf ("\n#define ATTR_FLAG_forward\t0x1\n"); printf ("#define ATTR_FLAG_backward\t0x2\n"); puts ("\n#endif /* GCC_INSN_ATTR_H */"); if (ferror (stdout) || fflush (stdout) || fclose (stdout)) return FATAL_EXIT_CODE; return SUCCESS_EXIT_CODE; }
gpl-2.0
jds2001/qemu-kvm
hw/exynos4_boards.c
26
5908
/* * Samsung exynos4 SoC based boards emulation * * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved. * Maksim Kozlov <m.kozlov@samsung.com> * Evgeny Voevodin <e.voevodin@samsung.com> * Igor Mitsyanko <i.mitsyanko@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * */ #include "sysemu.h" #include "sysbus.h" #include "net.h" #include "arm-misc.h" #include "exec-memory.h" #include "exynos4210.h" #include "boards.h" #undef DEBUG //#define DEBUG #ifdef DEBUG #undef PRINT_DEBUG #define PRINT_DEBUG(fmt, args...) \ do { \ fprintf(stderr, " [%s:%d] "fmt, __func__, __LINE__, ##args); \ } while (0) #else #define PRINT_DEBUG(fmt, args...) do {} while (0) #endif #define SMDK_LAN9118_BASE_ADDR 0x05000000 typedef enum Exynos4BoardType { EXYNOS4_BOARD_NURI, EXYNOS4_BOARD_SMDKC210, EXYNOS4_NUM_OF_BOARDS } Exynos4BoardType; static int exynos4_board_id[EXYNOS4_NUM_OF_BOARDS] = { [EXYNOS4_BOARD_NURI] = 0xD33, [EXYNOS4_BOARD_SMDKC210] = 0xB16, }; static int exynos4_board_smp_bootreg_addr[EXYNOS4_NUM_OF_BOARDS] = { [EXYNOS4_BOARD_NURI] = EXYNOS4210_SECOND_CPU_BOOTREG, [EXYNOS4_BOARD_SMDKC210] = EXYNOS4210_SECOND_CPU_BOOTREG, }; static unsigned long exynos4_board_ram_size[EXYNOS4_NUM_OF_BOARDS] = { [EXYNOS4_BOARD_NURI] = 0x40000000, [EXYNOS4_BOARD_SMDKC210] = 0x40000000, }; static struct arm_boot_info exynos4_board_binfo = { .loader_start = EXYNOS4210_BASE_BOOT_ADDR, .smp_loader_start = EXYNOS4210_SMP_BOOT_ADDR, .nb_cpus = EXYNOS4210_NCPUS, .write_secondary_boot = exynos4210_write_secondary, }; static QEMUMachine exynos4_machines[EXYNOS4_NUM_OF_BOARDS]; static void lan9215_init(uint32_t base, qemu_irq irq) { DeviceState *dev; SysBusDevice *s; /* This should be a 9215 but the 9118 is close enough */ if (nd_table[0].used) { qemu_check_nic_model(&nd_table[0], "lan9118"); dev = qdev_create(NULL, "lan9118"); qdev_set_nic_properties(dev, &nd_table[0]); qdev_prop_set_uint32(dev, "mode_16bit", 1); qdev_init_nofail(dev); s = sysbus_from_qdev(dev); sysbus_mmio_map(s, 0, base); sysbus_connect_irq(s, 0, irq); } } static Exynos4210State *exynos4_boards_init_common( const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, Exynos4BoardType board_type) { if (smp_cpus != EXYNOS4210_NCPUS) { fprintf(stderr, "%s board supports only %d CPU cores. Ignoring smp_cpus" " value.\n", exynos4_machines[board_type].name, exynos4_machines[board_type].max_cpus); } exynos4_board_binfo.ram_size = exynos4_board_ram_size[board_type]; exynos4_board_binfo.board_id = exynos4_board_id[board_type]; exynos4_board_binfo.smp_bootreg_addr = exynos4_board_smp_bootreg_addr[board_type]; exynos4_board_binfo.kernel_filename = kernel_filename; exynos4_board_binfo.initrd_filename = initrd_filename; exynos4_board_binfo.kernel_cmdline = kernel_cmdline; exynos4_board_binfo.gic_cpu_if_addr = EXYNOS4210_SMP_PRIVATE_BASE_ADDR + 0x100; PRINT_DEBUG("\n ram_size: %luMiB [0x%08lx]\n" " kernel_filename: %s\n" " kernel_cmdline: %s\n" " initrd_filename: %s\n", exynos4_board_ram_size[board_type] / 1048576, exynos4_board_ram_size[board_type], kernel_filename, kernel_cmdline, initrd_filename); return exynos4210_init(get_system_memory(), exynos4_board_ram_size[board_type]); } static void nuri_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { exynos4_boards_init_common(kernel_filename, kernel_cmdline, initrd_filename, EXYNOS4_BOARD_NURI); arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo); } static void smdkc210_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { Exynos4210State *s = exynos4_boards_init_common(kernel_filename, kernel_cmdline, initrd_filename, EXYNOS4_BOARD_SMDKC210); lan9215_init(SMDK_LAN9118_BASE_ADDR, qemu_irq_invert(s->irq_table[exynos4210_get_irq(37, 1)])); arm_load_kernel(arm_env_get_cpu(first_cpu), &exynos4_board_binfo); } static QEMUMachine exynos4_machines[EXYNOS4_NUM_OF_BOARDS] = { [EXYNOS4_BOARD_NURI] = { .name = "nuri", .desc = "Samsung NURI board (Exynos4210)", .init = nuri_init, .max_cpus = EXYNOS4210_NCPUS, }, [EXYNOS4_BOARD_SMDKC210] = { .name = "smdkc210", .desc = "Samsung SMDKC210 board (Exynos4210)", .init = smdkc210_init, .max_cpus = EXYNOS4210_NCPUS, }, }; static void exynos4_machine_init(void) { qemu_register_machine(&exynos4_machines[EXYNOS4_BOARD_NURI]); qemu_register_machine(&exynos4_machines[EXYNOS4_BOARD_SMDKC210]); } machine_init(exynos4_machine_init);
gpl-2.0
iwinoto/v4l-media_build
media/drivers/staging/rtl8723au/os_dep/os_intfs.c
26
28190
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _OS_INTFS_C_ #include <osdep_service.h> #include <drv_types.h> #include <xmit_osdep.h> #include <recv_osdep.h> #include <hal_intf.h> #include <rtw_version.h> #include <ethernet.h> #include <usb_osintf.h> #include <linux/version.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek Wireless Lan Driver"); MODULE_AUTHOR("Realtek Semiconductor Corp."); MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>"); MODULE_VERSION(DRIVERVERSION); MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); /* module param defaults */ static int rtw_chip_version = 0x00; static int rtw_rfintfs = HWPI; static int rtw_debug = 1; static int rtw_channel = 1;/* ad-hoc support requirement */ static int rtw_wireless_mode = WIRELESS_11BG_24N; static int rtw_vrtl_carrier_sense = AUTO_VCS; static int rtw_vcs_type = RTS_CTS;/* */ static int rtw_rts_thresh = 2347;/* */ static int rtw_frag_thresh = 2346;/* */ static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */ static int rtw_scan_mode = 1;/* active, passive */ static int rtw_adhoc_tx_pwr = 1; static int rtw_soft_ap; static int rtw_power_mgnt = 1; static int rtw_ips_mode = IPS_NORMAL; static int rtw_smart_ps = 2; module_param(rtw_ips_mode, int, 0644); MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode"); static int rtw_long_retry_lmt = 7; static int rtw_short_retry_lmt = 7; static int rtw_busy_thresh = 40; static int rtw_ack_policy = NORMAL_ACK; static int rtw_acm_method;/* 0:By SW 1:By HW. */ static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */ static int rtw_uapsd_enable; int rtw_ht_enable23A = 1; /* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */ int rtw_cbw40_enable23A = 3; int rtw_ampdu_enable23A = 1;/* for enable tx_ampdu */ /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable * 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */ static int rtw_rx_stbc = 1; static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */ /* Use 2 path Tx to transmit MCS0~7 and legacy mode */ static int rtw_lowrate_two_xmit = 1; /* int rf_config = RF_1T2R; 1T2R */ static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */ static int rtw_low_power; static int rtw_wifi_spec; static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX; #ifdef CONFIG_8723AU_BT_COEXIST static int rtw_btcoex_enable = 1; static int rtw_bt_iso = 2;/* 0:Low, 1:High, 2:From Efuse */ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, 5.OtherBusy */ static int rtw_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ static int rtw_bt_ampdu = 1 ; #endif /* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */ static int rtw_AcceptAddbaReq = true; static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */ static int rtw_antdiv_type; /* 0:decide by efuse */ static int rtw_enusbss;/* 0:disable, 1:enable */ static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */ static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */ static int rtw_hw_wps_pbc = 1; static int rtw_80211d; static int rtw_regulatory_id = 0xff;/* Regulatory tab id, 0xff = follow efuse's setting */ module_param(rtw_regulatory_id, int, 0644); static char *ifname = "wlan%d"; module_param(ifname, charp, 0644); MODULE_PARM_DESC(ifname, "The default name to allocate for first interface"); static char *if2name = "wlan%d"; module_param(if2name, charp, 0644); MODULE_PARM_DESC(if2name, "The default name to allocate for second interface"); module_param(rtw_channel_plan, int, 0644); module_param(rtw_chip_version, int, 0644); module_param(rtw_rfintfs, int, 0644); module_param(rtw_channel, int, 0644); module_param(rtw_wmm_enable, int, 0644); module_param(rtw_vrtl_carrier_sense, int, 0644); module_param(rtw_vcs_type, int, 0644); module_param(rtw_busy_thresh, int, 0644); module_param(rtw_ht_enable23A, int, 0644); module_param(rtw_cbw40_enable23A, int, 0644); module_param(rtw_ampdu_enable23A, int, 0644); module_param(rtw_rx_stbc, int, 0644); module_param(rtw_ampdu_amsdu, int, 0644); module_param(rtw_lowrate_two_xmit, int, 0644); module_param(rtw_rf_config, int, 0644); module_param(rtw_power_mgnt, int, 0644); module_param(rtw_smart_ps, int, 0644); module_param(rtw_low_power, int, 0644); module_param(rtw_wifi_spec, int, 0644); module_param(rtw_antdiv_cfg, int, 0644); module_param(rtw_enusbss, int, 0644); module_param(rtw_hwpdn_mode, int, 0644); module_param(rtw_hwpwrp_detect, int, 0644); module_param(rtw_hw_wps_pbc, int, 0644); static uint rtw_max_roaming_times = 2; module_param(rtw_max_roaming_times, uint, 0644); MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try"); module_param(rtw_80211d, int, 0644); MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism"); #ifdef CONFIG_8723AU_BT_COEXIST module_param(rtw_btcoex_enable, int, 0644); MODULE_PARM_DESC(rtw_btcoex_enable, "Enable BT co-existence mechanism"); #endif static uint rtw_notch_filter; module_param(rtw_notch_filter, uint, 0644); MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P"); module_param_named(debug, rtw_debug, int, 0444); MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)"); static int netdev_close(struct net_device *pnetdev); static uint loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev) { struct registry_priv *registry_par = &padapter->registrypriv; uint status = _SUCCESS; GlobalDebugLevel23A = rtw_debug; registry_par->chip_version = (u8)rtw_chip_version; registry_par->rfintfs = (u8)rtw_rfintfs; memcpy(registry_par->ssid.ssid, "ANY", 3); registry_par->ssid.ssid_len = 3; registry_par->channel = (u8)rtw_channel; registry_par->wireless_mode = (u8)rtw_wireless_mode; registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense; registry_par->vcs_type = (u8)rtw_vcs_type; registry_par->rts_thresh = (u16)rtw_rts_thresh; registry_par->frag_thresh = (u16)rtw_frag_thresh; registry_par->preamble = (u8)rtw_preamble; registry_par->scan_mode = (u8)rtw_scan_mode; registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr; registry_par->soft_ap = (u8)rtw_soft_ap; registry_par->smart_ps = (u8)rtw_smart_ps; registry_par->power_mgnt = (u8)rtw_power_mgnt; registry_par->ips_mode = (u8)rtw_ips_mode; registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt; registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt; registry_par->busy_thresh = (u16)rtw_busy_thresh; registry_par->ack_policy = (u8)rtw_ack_policy; registry_par->acm_method = (u8)rtw_acm_method; /* UAPSD */ registry_par->wmm_enable = (u8)rtw_wmm_enable; registry_par->uapsd_enable = (u8)rtw_uapsd_enable; registry_par->ht_enable = (u8)rtw_ht_enable23A; registry_par->cbw40_enable = (u8)rtw_cbw40_enable23A; registry_par->ampdu_enable = (u8)rtw_ampdu_enable23A; registry_par->rx_stbc = (u8)rtw_rx_stbc; registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu; registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit; registry_par->rf_config = (u8)rtw_rf_config; registry_par->low_power = (u8)rtw_low_power; registry_par->wifi_spec = (u8)rtw_wifi_spec; registry_par->channel_plan = (u8)rtw_channel_plan; #ifdef CONFIG_8723AU_BT_COEXIST registry_par->btcoex = (u8)rtw_btcoex_enable; registry_par->bt_iso = (u8)rtw_bt_iso; registry_par->bt_sco = (u8)rtw_bt_sco; registry_par->bt_ampdu = (u8)rtw_bt_ampdu; #endif registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq; registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg; registry_par->antdiv_type = (u8)rtw_antdiv_type; /* 0:disable, 1:enable, 2:by EFUSE config */ registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode; /* 0:disable, 1:enable */ registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect; registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc; registry_par->max_roaming_times = (u8)rtw_max_roaming_times; registry_par->enable80211d = (u8)rtw_80211d; snprintf(registry_par->ifname, 16, "%s", ifname); snprintf(registry_par->if2name, 16, "%s", if2name); registry_par->notch_filter = (u8)rtw_notch_filter; registry_par->regulatory_tid = (u8)rtw_regulatory_id; return status; } static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct sockaddr *addr = p; if (!padapter->bup) ether_addr_copy(padapter->eeprompriv.mac_addr, addr->sa_data); return 0; } static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct recv_priv *precvpriv = &padapter->recvpriv; padapter->stats.tx_packets = pxmitpriv->tx_pkts; padapter->stats.rx_packets = precvpriv->rx_pkts; padapter->stats.tx_dropped = pxmitpriv->tx_drop; padapter->stats.rx_dropped = precvpriv->rx_drop; padapter->stats.tx_bytes = pxmitpriv->tx_bytes; padapter->stats.rx_bytes = precvpriv->rx_bytes; return &padapter->stats; } /* * AC to queue mapping * * AC_VO -> queue 0 * AC_VI -> queue 1 * AC_BE -> queue 2 * AC_BK -> queue 3 */ static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; /* Given a data frame determine the 802.1p/1d tag to use. */ static unsigned int rtw_classify8021d(struct sk_buff *skb) { unsigned int dscp; /* skb->priority values from 256->263 are magic values to * directly indicate a specific 802.1d priority. This is used * to allow 802.1d priority to be passed directly in from VLAN * tags, etc. */ if (skb->priority >= 256 && skb->priority <= 263) return skb->priority - 256; switch (skb->protocol) { case htons(ETH_P_IP): dscp = ip_hdr(skb)->tos & 0xfc; break; default: return 0; } return dscp >> 5; } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct rtw_adapter *padapter = netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; skb->priority = rtw_classify8021d(skb); if (pmlmepriv->acm_mask != 0) skb->priority = qos_acm23a(pmlmepriv->acm_mask, skb->priority); return rtw_1d_to_queue[skb->priority]; } u16 rtw_recv_select_queue23a(struct sk_buff *skb) { struct iphdr *piphdr; unsigned int dscp; u16 eth_type; u32 priority; u8 *pdata = skb->data; memcpy(&eth_type, pdata + (ETH_ALEN << 1), 2); switch (eth_type) { case htons(ETH_P_IP): piphdr = (struct iphdr *)(pdata + ETH_HLEN); dscp = piphdr->tos & 0xfc; priority = dscp >> 5; break; default: priority = 0; } return rtw_1d_to_queue[priority]; } static const struct net_device_ops rtw_netdev_ops = { .ndo_open = netdev_open23a, .ndo_stop = netdev_close, .ndo_start_xmit = rtw_xmit23a_entry23a, .ndo_select_queue = rtw_select_queue, .ndo_set_mac_address = rtw_net_set_mac_address, .ndo_get_stats = rtw_net_get_stats, }; int rtw_init_netdev23a_name23a(struct net_device *pnetdev, const char *ifname) { if (dev_alloc_name(pnetdev, ifname) < 0) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("dev_alloc_name, fail!\n")); } netif_carrier_off(pnetdev); return 0; } static const struct device_type wlan_type = { .name = "wlan", }; struct net_device *rtw_init_netdev23a(struct rtw_adapter *old_padapter) { struct rtw_adapter *padapter; struct net_device *pnetdev; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n")); pnetdev = alloc_etherdev_mq(sizeof(struct rtw_adapter), 4); if (!pnetdev) return NULL; pnetdev->dev.type = &wlan_type; padapter = netdev_priv(pnetdev); padapter->pnetdev = pnetdev; DBG_8723A("register rtw_netdev_ops to netdev_ops\n"); pnetdev->netdev_ops = &rtw_netdev_ops; pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */ /* step 2. */ loadparam(padapter, pnetdev); return pnetdev; } u32 rtw_start_drv_threads23a(struct rtw_adapter *padapter) { u32 _status = _SUCCESS; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_start_drv_threads23a\n")); padapter->cmdThread = kthread_run(rtw_cmd_thread23a, padapter, "RTW_CMD_THREAD"); if (IS_ERR(padapter->cmdThread)) { _status = _FAIL; } else { /* wait for cmd_thread to run */ down(&padapter->cmdpriv.terminate_cmdthread_sema); } rtw_hal_start_thread23a(padapter); return _status; } void rtw_stop_drv_threads23a(struct rtw_adapter *padapter) { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads23a\n")); /* Below is to termindate rtw_cmd_thread23a & event_thread... */ up(&padapter->cmdpriv.cmd_queue_sema); if (padapter->cmdThread) down(&padapter->cmdpriv.terminate_cmdthread_sema); rtw_hal_stop_thread23a(padapter); } static u8 rtw_init_default_value(struct rtw_adapter *padapter) { struct registry_priv *pregistrypriv = &padapter->registrypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; u8 ret = _SUCCESS; /* xmit_priv */ pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense; pxmitpriv->vcs = pregistrypriv->vcs_type; pxmitpriv->vcs_type = pregistrypriv->vcs_type; /* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */ pxmitpriv->frag_len = pregistrypriv->frag_thresh; /* mlme_priv */ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ pmlmepriv->scan_mode = SCAN_ACTIVE; /* ht_priv */ pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */ /* security_priv */ psecuritypriv->binstallGrpkey = _FAIL; /* open system */ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_; psecuritypriv->dot11PrivacyKeyIndex = 0; psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_; psecuritypriv->dot118021XGrpKeyid = 1; psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen; psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled; /* registry_priv */ rtw_init_registrypriv_dev_network23a(padapter); rtw_update_registrypriv_dev_network23a(padapter); /* hal_priv */ rtw_hal_def_value_init23a(padapter); /* misc. */ padapter->bReadPortCancel = false; padapter->bWritePortCancel = false; padapter->bRxRSSIDisplay = 0; padapter->bNotifyChannelChange = 0; #ifdef CONFIG_8723AU_P2P padapter->bShowGetP2PState = 1; #endif return ret; } u8 rtw_reset_drv_sw23a(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; u8 ret8 = _SUCCESS; /* hal_priv */ rtw_hal_def_value_init23a(padapter); padapter->bReadPortCancel = false; padapter->bWritePortCancel = false; padapter->bRxRSSIDisplay = 0; pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ padapter->xmitpriv.tx_pkts = 0; padapter->recvpriv.rx_pkts = 0; pmlmepriv->LinkDetectInfo.bBusyTraffic = false; _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING); rtw_hal_sreset_reset23a_value23a(padapter); pwrctrlpriv->pwr_state_check_cnts = 0; /* mlmeextpriv */ padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE; rtw_set_signal_stat_timer(&padapter->recvpriv); return ret8; } u8 rtw_init_drv_sw23a(struct rtw_adapter *padapter) { u8 ret8 = _SUCCESS; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw23a\n")); if ((rtw_init_cmd_priv23a(&padapter->cmdpriv)) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n")); ret8 = _FAIL; goto exit; } padapter->cmdpriv.padapter = padapter; if (rtw_init_evt_priv23a(&padapter->evtpriv) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n")); ret8 = _FAIL; goto exit; } if (rtw_init_mlme_priv23a(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_priv\n")); ret8 = _FAIL; goto exit; } #ifdef CONFIG_8723AU_P2P rtw_init_wifidirect_timers23a(padapter); init_wifidirect_info23a(padapter, P2P_ROLE_DISABLE); reset_global_wifidirect_info23a(padapter); rtw_init_cfg80211_wifidirect_info(padapter); #ifdef CONFIG_8723AU_P2P if (rtw_init_wifi_display_info(padapter) == _FAIL) RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init init_wifi_display_info\n")); #endif #endif /* CONFIG_8723AU_P2P */ if (init_mlme_ext_priv23a(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n")); ret8 = _FAIL; goto exit; } if (_rtw_init_xmit_priv23a(&padapter->xmitpriv, padapter) == _FAIL) { DBG_8723A("Can't _rtw_init_xmit_priv23a\n"); ret8 = _FAIL; goto exit; } if (_rtw_init_recv_priv23a(&padapter->recvpriv, padapter) == _FAIL) { DBG_8723A("Can't _rtw_init_recv_priv23a\n"); ret8 = _FAIL; goto exit; } if (_rtw_init_sta_priv23a(&padapter->stapriv) == _FAIL) { DBG_8723A("Can't _rtw_init_sta_priv23a\n"); ret8 = _FAIL; goto exit; } padapter->stapriv.padapter = padapter; padapter->setband = GHZ24_50; rtw_init_bcmc_stainfo23a(padapter); rtw_init_pwrctrl_priv23a(padapter); ret8 = rtw_init_default_value(padapter); rtw_hal_dm_init23a(padapter); rtw_hal_sw_led_init23a(padapter); rtw_hal_sreset_init23a(padapter); exit: RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw23a\n")); return ret8; } void rtw_cancel_all_timer23a(struct rtw_adapter *padapter) { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer23a\n")); del_timer_sync(&padapter->mlmepriv.assoc_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel association timer complete!\n")); del_timer_sync(&padapter->mlmepriv.scan_to_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel scan_to_timer!\n")); del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel dynamic_chk_timer!\n")); /* cancel sw led timer */ rtw_hal_sw_led_deinit23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel DeInitSwLeds!\n")); del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer); #ifdef CONFIG_8723AU_P2P del_timer_sync(&padapter->cfg80211_wdinfo.remain_on_ch_timer); #endif /* CONFIG_8723AU_P2P */ del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer); rtw_clear_scan_deny(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer23a:cancel set_scan_deny_timer!\n")); del_timer_sync(&padapter->recvpriv.signal_stat_timer); /* cancel dm timer */ rtw_hal_dm_deinit23a(padapter); } u8 rtw_free_drv_sw23a(struct rtw_adapter *padapter) { #ifdef CONFIG_8723AU_P2P struct wifidirect_info *pwdinfo; #endif RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw23a")); /* we can call rtw_p2p_enable23a here, but: * 1. rtw_p2p_enable23a may have IO operation * 2. rtw_p2p_enable23a is bundled with wext interface */ #ifdef CONFIG_8723AU_P2P pwdinfo = &padapter->wdinfo; if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) { del_timer_sync(&pwdinfo->find_phase_timer); del_timer_sync(&pwdinfo->restore_p2p_state_timer); del_timer_sync(&pwdinfo->pre_tx_scan_timer); rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE); } #endif free_mlme_ext_priv23a(&padapter->mlmeextpriv); rtw_free_cmd_priv23a(&padapter->cmdpriv); rtw_free_evt_priv23a(&padapter->evtpriv); rtw_free_mlme_priv23a(&padapter->mlmepriv); _rtw_free_xmit_priv23a(&padapter->xmitpriv); _rtw_free_sta_priv23a(&padapter->stapriv);/* will free bcmc_stainfo here */ _rtw_free_recv_priv23a(&padapter->recvpriv); rtw_free_pwrctrl_priv(padapter); rtw_hal_free_data23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw23a\n")); /* free the old_pnetdev */ if (padapter->rereg_nd_name_priv.old_pnetdev) { free_netdev(padapter->rereg_nd_name_priv.old_pnetdev); padapter->rereg_nd_name_priv.old_pnetdev = NULL; } /* clear pbuddy_adapter to avoid access wrong pointer. */ if (padapter->pbuddy_adapter != NULL) padapter->pbuddy_adapter->pbuddy_adapter = NULL; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw23a\n")); return _SUCCESS; } static int _rtw_drv_register_netdev(struct rtw_adapter *padapter, char *name) { struct net_device *pnetdev = padapter->pnetdev; int ret = _SUCCESS; /* alloc netdev name */ rtw_init_netdev23a_name23a(pnetdev, name); ether_addr_copy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr); /* Tell the network stack we exist */ if (register_netdev(pnetdev)) { DBG_8723A(FUNC_NDEV_FMT "Failed!\n", FUNC_NDEV_ARG(pnetdev)); ret = _FAIL; goto error_register_netdev; } DBG_8723A("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id + 1), MAC_ARG(pnetdev->dev_addr)); return ret; error_register_netdev: if (padapter->iface_id > IFACE_ID0) { rtw_free_drv_sw23a(padapter); free_netdev(pnetdev); } return ret; } int rtw_drv_register_netdev(struct rtw_adapter *if1) { struct dvobj_priv *dvobj = if1->dvobj; int i, status = _SUCCESS; if (dvobj->iface_nums < IFACE_ID_MAX) { for (i = 0; i < dvobj->iface_nums; i++) { struct rtw_adapter *padapter = dvobj->padapters[i]; if (padapter) { char *name; if (padapter->iface_id == IFACE_ID0) name = if1->registrypriv.ifname; else if (padapter->iface_id == IFACE_ID1) name = if1->registrypriv.if2name; else name = "wlan%d"; status = _rtw_drv_register_netdev(padapter, name); if (status != _SUCCESS) break; } } } return status; } int netdev_open23a(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct pwrctrl_priv *pwrctrlpriv; int ret = 0; uint status; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - dev_open\n")); DBG_8723A("+871x_drv - drv_open, bup =%d\n", padapter->bup); mutex_lock(&adapter_to_dvobj(padapter)->hw_init_mutex); pwrctrlpriv = &padapter->pwrctrlpriv; if (pwrctrlpriv->ps_flag) { padapter->net_closed = false; goto netdev_open23a_normal_process; } if (!padapter->bup) { padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bCardDisableWOHSM = false; status = rtw_hal_init23a(padapter); if (status == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl871x_hal_init(): Can't init h/w!\n")); goto netdev_open23a_error; } DBG_8723A("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr)); status = rtw_start_drv_threads23a(padapter); if (status == _FAIL) { DBG_8723A("Initialize driver software resource Failed!\n"); goto netdev_open23a_error; } if (init_hw_mlme_ext23a(padapter) == _FAIL) { DBG_8723A("can't init mlme_ext_priv\n"); goto netdev_open23a_error; } if (padapter->intf_start) padapter->intf_start(padapter); rtw_cfg80211_init_wiphy(padapter); rtw_led_control(padapter, LED_CTL_NO_LINK); padapter->bup = true; } padapter->net_closed = false; mod_timer(&padapter->mlmepriv.dynamic_chk_timer, jiffies + msecs_to_jiffies(2000)); padapter->pwrctrlpriv.bips_processing = false; rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); /* netif_carrier_on(pnetdev);call this func when rtw23a_joinbss_event_cb return success */ if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_start_all_queues(pnetdev); else netif_tx_wake_all_queues(pnetdev); netdev_open23a_normal_process: RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - dev_open\n")); DBG_8723A("-871x_drv - drv_open, bup =%d\n", padapter->bup); exit: mutex_unlock(&adapter_to_dvobj(padapter)->hw_init_mutex); return ret; netdev_open23a_error: padapter->bup = false; netif_carrier_off(pnetdev); netif_tx_stop_all_queues(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-871x_drv - dev_open, fail!\n")); DBG_8723A("-871x_drv - drv_open fail, bup =%d\n", padapter->bup); ret = -1; goto exit; } static int ips_netdrv_open(struct rtw_adapter *padapter) { int status = _SUCCESS; padapter->net_closed = false; DBG_8723A("===> %s.........\n", __func__); padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bCardDisableWOHSM = false; status = rtw_hal_init23a(padapter); if (status == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n")); goto netdev_open23a_error; } if (padapter->intf_start) padapter->intf_start(padapter); rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); mod_timer(&padapter->mlmepriv.dynamic_chk_timer, jiffies + msecs_to_jiffies(5000)); return _SUCCESS; netdev_open23a_error: /* padapter->bup = false; */ DBG_8723A("-ips_netdrv_open - drv_open failure, bup =%d\n", padapter->bup); return _FAIL; } int rtw_ips_pwr_up23a(struct rtw_adapter *padapter) { int result; unsigned long start_time = jiffies; DBG_8723A("===> rtw_ips_pwr_up23a..............\n"); rtw_reset_drv_sw23a(padapter); result = ips_netdrv_open(padapter); rtw_led_control(padapter, LED_CTL_NO_LINK); DBG_8723A("<=== rtw_ips_pwr_up23a.............. in %dms\n", jiffies_to_msecs(jiffies - start_time)); return result; } void rtw_ips_pwr_down23a(struct rtw_adapter *padapter) { unsigned long start_time = jiffies; DBG_8723A("===> rtw_ips_pwr_down23a...................\n"); padapter->bCardDisableWOHSM = true; padapter->net_closed = true; rtw_led_control(padapter, LED_CTL_POWER_OFF); rtw_ips_dev_unload23a(padapter); padapter->bCardDisableWOHSM = false; DBG_8723A("<=== rtw_ips_pwr_down23a..................... in %dms\n", jiffies_to_msecs(jiffies - start_time)); } void rtw_ips_dev_unload23a(struct rtw_adapter *padapter) { rtw_hal_set_hwreg23a(padapter, HW_VAR_FIFO_CLEARN_UP, NULL); if (padapter->intf_stop) padapter->intf_stop(padapter); /* s5. */ if (!padapter->bSurpriseRemoved) rtw_hal_deinit23a(padapter); } int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal) { int status; if (bnormal) status = netdev_open23a(pnetdev); else status = (_SUCCESS == ips_netdrv_open(netdev_priv(pnetdev))) ? (0) : (-1); return status; } static int netdev_close(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n")); if (padapter->pwrctrlpriv.bInternalAutoSuspend) { if (padapter->pwrctrlpriv.rf_pwrstate == rf_off) padapter->pwrctrlpriv.ps_flag = true; } padapter->net_closed = true; if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) { DBG_8723A("(2)871x_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed); /* s1. */ if (pnetdev) { if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_stop_all_queues(pnetdev); } /* s2. */ LeaveAllPowerSaveMode23a(padapter); rtw_disassoc_cmd23a(padapter, 500, false); /* s2-2. indicate disconnect to os */ rtw_indicate_disconnect23a(padapter); /* s2-3. */ rtw_free_assoc_resources23a(padapter, 1); /* s2-4. */ rtw_free_network_queue23a(padapter, true); /* Close LED */ rtw_led_control(padapter, LED_CTL_POWER_OFF); } #ifdef CONFIG_8723AU_P2P if (wdev_to_priv(padapter->rtw_wdev)->p2p_enabled) wdev_to_priv(padapter->rtw_wdev)->p2p_enabled = false; rtw_p2p_enable23a(padapter, P2P_ROLE_DISABLE); #endif /* CONFIG_8723AU_P2P */ rtw_scan_abort23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); return 0; } void rtw_ndev_destructor(struct net_device *ndev) { DBG_8723A(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev)); kfree(ndev->ieee80211_ptr); free_netdev(ndev); }
gpl-2.0
systembugtj/playasa
src/Thirdparty/openssl-0.9.8x/crypto/pkcs7/pk7_mime.c
26
4011
/* pk7_mime.c */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project. */ /* ==================================================================== * Copyright (c) 1999-2005 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * */ #include <stdio.h> #include <ctype.h> #include "cryptlib.h" #include <openssl/rand.h> #include <openssl/x509.h> #include <openssl/asn1.h> /* PKCS#7 wrappers round generalised MIME routines */ PKCS7 *SMIME_read_PKCS7(BIO *bio, BIO **bcont) { return (PKCS7 *)SMIME_read_ASN1(bio, bcont, ASN1_ITEM_rptr(PKCS7)); } /* Callback for int_smime_write_ASN1 */ static int pk7_output_data(BIO *out, BIO *data, ASN1_VALUE *val, int flags, const ASN1_ITEM *it) { PKCS7 *p7 = (PKCS7 *)val; BIO *tmpbio, *p7bio; if (!(flags & SMIME_DETACHED)) { SMIME_crlf_copy(data, out, flags); return 1; } /* Let PKCS7 code prepend any needed BIOs */ p7bio = PKCS7_dataInit(p7, out); if (!p7bio) return 0; /* Copy data across, passing through filter BIOs for processing */ SMIME_crlf_copy(data, p7bio, flags); /* Finalize structure */ if (PKCS7_dataFinal(p7, p7bio) <= 0) goto err; err: /* Now remove any digests prepended to the BIO */ while (p7bio != out) { tmpbio = BIO_pop(p7bio); BIO_free(p7bio); p7bio = tmpbio; } return 1; } int SMIME_write_PKCS7(BIO *bio, PKCS7 *p7, BIO *data, int flags) { STACK_OF(X509_ALGOR) *mdalgs; int ctype_nid = OBJ_obj2nid(p7->type); if (ctype_nid == NID_pkcs7_signed) mdalgs = p7->d.sign->md_algs; else mdalgs = NULL; return int_smime_write_ASN1(bio, (ASN1_VALUE *)p7, data, flags, ctype_nid, NID_undef, mdalgs, pk7_output_data, ASN1_ITEM_rptr(PKCS7)); }
gpl-2.0
rocklee104/linux-2.6.28-tiny6410
drivers/uwb/uwb-debug.c
26
8860
/* * Ultra Wide Band * Debug support * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: doc */ #include <linux/spinlock.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/uwb/debug-cmd.h> #define D_LOCAL 0 #include <linux/uwb/debug.h> #include "uwb-internal.h" void dump_bytes(struct device *dev, const void *_buf, size_t rsize) { const char *buf = _buf; char line[32]; size_t offset = 0; int cnt, cnt2; for (cnt = 0; cnt < rsize; cnt += 8) { size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8; for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) { offset += scnprintf(line + offset, sizeof(line) - offset, "%02x ", buf[cnt + cnt2] & 0xff); } if (dev) dev_info(dev, "%s\n", line); else printk(KERN_INFO "%s\n", line); } } EXPORT_SYMBOL_GPL(dump_bytes); /* * Debug interface * * Per radio controller debugfs files (in uwb/uwbN/): * * command: Flexible command interface (see <linux/uwb/debug-cmd.h>). * * reservations: information on reservations. * * accept: Set to true (Y or 1) to accept reservation requests from * peers. * * drp_avail: DRP availability information. */ struct uwb_dbg { struct uwb_pal pal; u32 accept; struct list_head rsvs; struct dentry *root_d; struct dentry *command_f; struct dentry *reservations_f; struct dentry *accept_f; struct dentry *drp_avail_f; }; static struct dentry *root_dir; static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct device *dev = &rc->uwb_dev.dev; struct uwb_dev_addr devaddr; char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); if (rsv->target.type == UWB_RSV_TARGET_DEV) devaddr = rsv->target.dev->dev_addr; else devaddr = rsv->target.devaddr; uwb_dev_addr_print(target, sizeof(target), &devaddr); dev_dbg(dev, "debug: rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); } static int cmd_rsv_establish(struct uwb_rc *rc, struct uwb_dbg_cmd_rsv_establish *cmd) { struct uwb_mac_addr macaddr; struct uwb_rsv *rsv; struct uwb_dev *target; int ret; memcpy(&macaddr, cmd->target, sizeof(macaddr)); target = uwb_dev_get_by_macaddr(rc, &macaddr); if (target == NULL) return -ENODEV; rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL); if (rsv == NULL) { uwb_dev_put(target); return -ENOMEM; } rsv->owner = &rc->uwb_dev; rsv->target.type = UWB_RSV_TARGET_DEV; rsv->target.dev = target; rsv->type = cmd->type; rsv->max_mas = cmd->max_mas; rsv->min_mas = cmd->min_mas; rsv->sparsity = cmd->sparsity; ret = uwb_rsv_establish(rsv); if (ret) uwb_rsv_destroy(rsv); else list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); return ret; } static int cmd_rsv_terminate(struct uwb_rc *rc, struct uwb_dbg_cmd_rsv_terminate *cmd) { struct uwb_rsv *rsv, *found = NULL; int i = 0; list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { if (i == cmd->index) { found = rsv; break; } } if (!found) return -EINVAL; list_del(&found->pal_node); uwb_rsv_terminate(found); return 0; } static int command_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t command_write(struct file *file, const char __user *buf, size_t len, loff_t *off) { struct uwb_rc *rc = file->private_data; struct uwb_dbg_cmd cmd; int ret; if (len != sizeof(struct uwb_dbg_cmd)) return -EINVAL; if (copy_from_user(&cmd, buf, len) != 0) return -EFAULT; switch (cmd.type) { case UWB_DBG_CMD_RSV_ESTABLISH: ret = cmd_rsv_establish(rc, &cmd.rsv_establish); break; case UWB_DBG_CMD_RSV_TERMINATE: ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); break; default: return -EINVAL; } return ret < 0 ? ret : len; } static struct file_operations command_fops = { .open = command_open, .write = command_write, .read = NULL, .llseek = no_llseek, .owner = THIS_MODULE, }; static int reservations_print(struct seq_file *s, void *p) { struct uwb_rc *rc = s->private; struct uwb_rsv *rsv; mutex_lock(&rc->rsvs_mutex); list_for_each_entry(rsv, &rc->reservations, rc_node) { struct uwb_dev_addr devaddr; char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; bool is_owner; char buf[72]; uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); if (rsv->target.type == UWB_RSV_TARGET_DEV) { devaddr = rsv->target.dev->dev_addr; is_owner = &rc->uwb_dev == rsv->owner; } else { devaddr = rsv->target.devaddr; is_owner = true; } uwb_dev_addr_print(target, sizeof(target), &devaddr); seq_printf(s, "%c %s -> %s: %s\n", is_owner ? 'O' : 'T', owner, target, uwb_rsv_state_str(rsv->state)); seq_printf(s, " stream: %d type: %s\n", rsv->stream, uwb_rsv_type_str(rsv->type)); bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); seq_printf(s, " %s\n", buf); } mutex_unlock(&rc->rsvs_mutex); return 0; } static int reservations_open(struct inode *inode, struct file *file) { return single_open(file, reservations_print, inode->i_private); } static struct file_operations reservations_fops = { .open = reservations_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static int drp_avail_print(struct seq_file *s, void *p) { struct uwb_rc *rc = s->private; char buf[72]; bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS); seq_printf(s, "global: %s\n", buf); bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS); seq_printf(s, "local: %s\n", buf); bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS); seq_printf(s, "pending: %s\n", buf); return 0; } static int drp_avail_open(struct inode *inode, struct file *file) { return single_open(file, drp_avail_print, inode->i_private); } static struct file_operations drp_avail_fops = { .open = drp_avail_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static void uwb_dbg_new_rsv(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; if (rc->dbg->accept) uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL); } /** * uwb_dbg_add_rc - add a debug interface for a radio controller * @rc: the radio controller */ void uwb_dbg_add_rc(struct uwb_rc *rc) { rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL); if (rc->dbg == NULL) return; INIT_LIST_HEAD(&rc->dbg->rsvs); uwb_pal_init(&rc->dbg->pal); rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; uwb_pal_register(rc, &rc->dbg->pal); if (root_dir) { rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), root_dir); rc->dbg->command_f = debugfs_create_file("command", 0200, rc->dbg->root_d, rc, &command_fops); rc->dbg->reservations_f = debugfs_create_file("reservations", 0444, rc->dbg->root_d, rc, &reservations_fops); rc->dbg->accept_f = debugfs_create_bool("accept", 0644, rc->dbg->root_d, &rc->dbg->accept); rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444, rc->dbg->root_d, rc, &drp_avail_fops); } } /** * uwb_dbg_add_rc - remove a radio controller's debug interface * @rc: the radio controller */ void uwb_dbg_del_rc(struct uwb_rc *rc) { struct uwb_rsv *rsv, *t; if (rc->dbg == NULL) return; list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { uwb_rsv_destroy(rsv); } uwb_pal_unregister(rc, &rc->dbg->pal); if (root_dir) { debugfs_remove(rc->dbg->drp_avail_f); debugfs_remove(rc->dbg->accept_f); debugfs_remove(rc->dbg->reservations_f); debugfs_remove(rc->dbg->command_f); debugfs_remove(rc->dbg->root_d); } } /** * uwb_dbg_exit - initialize the debug interface sub-module */ void uwb_dbg_init(void) { root_dir = debugfs_create_dir("uwb", NULL); } /** * uwb_dbg_exit - clean-up the debug interface sub-module */ void uwb_dbg_exit(void) { debugfs_remove(root_dir); }
gpl-2.0
FOSSEE/FOSSEE-netbook-kernel-source
drivers/net/wireless/rtl8188C_8192C/os_dep/linux/usb_ops_linux.c
26
15750
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * *******************************************************************************/ #define _USB_OPS_LINUX_C_ #include <drv_types.h> #include <usb_ops_linux.h> #include <rtw_sreset.h> #ifdef CONFIG_USB_SUPPORT_ASYNC_VDN_REQ static void _usbctrl_vendorreq_async_callback(struct urb *urb, struct pt_regs *regs) { if (urb) { if (urb->context) { kfree(urb->context); } usb_free_urb(urb); } } static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, u16 value, u16 index, void *pdata, u16 len, u8 requesttype) { int rc; unsigned int pipe; u8 reqtype; struct usb_ctrlrequest *dr; struct urb *urb; struct rtl819x_async_write_data { u8 data[VENDOR_CMD_MAX_DATA_LEN]; struct usb_ctrlrequest dr; } *buf; if (requesttype == VENDOR_READ) { pipe = usb_rcvctrlpipe(udev, 0);//read_in reqtype = REALTEK_USB_VENQT_READ; } else { pipe = usb_sndctrlpipe(udev, 0);//write_out reqtype = REALTEK_USB_VENQT_WRITE; } buf = (struct rtl819x_async_write_data *)rtw_zmalloc(sizeof(*buf)); if (!buf) { rc = -ENOMEM; goto exit; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { rtw_mfree((u8*)buf, sizeof(*buf)); rc = -ENOMEM; goto exit; } dr = &buf->dr; dr->bRequestType = reqtype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(len); _rtw_memcpy(buf, pdata, len); usb_fill_control_urb(urb, udev, pipe, (unsigned char *)dr, buf, len, _usbctrl_vendorreq_async_callback, buf); rc = usb_submit_urb(urb, GFP_ATOMIC); if (rc < 0) { rtw_mfree((u8*)buf, sizeof(*buf)); usb_free_urb(urb); } exit: return rc; } int usb_write_async(struct usb_device *udev, u32 addr, void *pdata, u16 len) { u8 request; u8 requesttype; u16 wvalue; u16 index; int ret; requesttype = VENDOR_WRITE;//write_out request = REALTEK_USB_VENQT_CMD_REQ; index = REALTEK_USB_VENQT_CMD_IDX;//n/a wvalue = (u16)(addr&0x0000ffff); ret = _usbctrl_vendorreq_async_write(udev, request, wvalue, index, pdata, len, requesttype); return ret; } int usb_async_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val) { u8 data; int ret; struct dvobj_priv *pdvobjpriv = (struct dvobj_priv *)pintfhdl->pintf_dev; struct usb_device *udev=pdvobjpriv->pusbdev; _func_enter_; data = val; ret = usb_write_async(udev, addr, &data, 1); _func_exit_; return ret; } int usb_async_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val) { u16 data; int ret; struct dvobj_priv *pdvobjpriv = (struct dvobj_priv *)pintfhdl->pintf_dev; struct usb_device *udev=pdvobjpriv->pusbdev; _func_enter_; data = val; ret = usb_write_async(udev, addr, &data, 2); _func_exit_; return ret; } int usb_async_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val) { u32 data; int ret; struct dvobj_priv *pdvobjpriv = (struct dvobj_priv *)pintfhdl->pintf_dev; struct usb_device *udev=pdvobjpriv->pusbdev; _func_enter_; data = val; ret = usb_write_async(udev, addr, &data, 4); _func_exit_; return ret; } #endif /* CONFIG_USB_SUPPORT_ASYNC_VDN_REQ */ unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr) { unsigned int pipe=0; int ep_num=0; _adapter *padapter = pdvobj->if1; struct usb_device *pusbd = pdvobj->pusbdev; HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); if (addr == RECV_BULK_IN_ADDR) { pipe=usb_rcvbulkpipe(pusbd, pHalData->RtBulkInPipe); } else if (addr == RECV_INT_IN_ADDR) { pipe=usb_rcvbulkpipe(pusbd, pHalData->RtIntInPipe); } else if (addr < HW_QUEUE_ENTRY) { ep_num = pHalData->Queue2EPNum[addr]; pipe = usb_sndbulkpipe(pusbd, ep_num); } return pipe; } struct zero_bulkout_context{ void *pbuf; void *purb; void *pirp; void *padapter; }; static void usb_bulkout_zero_complete(struct urb *purb, struct pt_regs *regs) { struct zero_bulkout_context *pcontext = (struct zero_bulkout_context *)purb->context; //DBG_8192C("+usb_bulkout_zero_complete\n"); if(pcontext) { if(pcontext->pbuf) { rtw_mfree(pcontext->pbuf, sizeof(int)); } if(pcontext->purb && (pcontext->purb==purb)) { usb_free_urb(pcontext->purb); } rtw_mfree((u8*)pcontext, sizeof(struct zero_bulkout_context)); } } static u32 usb_bulkout_zero(struct intf_hdl *pintfhdl, u32 addr) { int pipe, status, len; u32 ret; unsigned char *pbuf; struct zero_bulkout_context *pcontext; PURB purb = NULL; _adapter *padapter = (_adapter *)pintfhdl->padapter; struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); struct usb_device *pusbd = pdvobj->pusbdev; //DBG_871X("%s\n", __func__); if((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||(padapter->pwrctrlpriv.pnp_bstop_trx)) { return _FAIL; } pcontext = (struct zero_bulkout_context *)rtw_zmalloc(sizeof(struct zero_bulkout_context)); pbuf = (unsigned char *)rtw_zmalloc(sizeof(int)); purb = usb_alloc_urb(0, GFP_ATOMIC); len = 0; pcontext->pbuf = pbuf; pcontext->purb = purb; pcontext->pirp = NULL; pcontext->padapter = padapter; //translate DMA FIFO addr to pipehandle //pipe = ffaddr2pipehdl(pdvobj, addr); usb_fill_bulk_urb(purb, pusbd, pipe, pbuf, len, usb_bulkout_zero_complete, pcontext);//context is pcontext status = usb_submit_urb(purb, GFP_ATOMIC); if (!status) { ret= _SUCCESS; } else { ret= _FAIL; } return _SUCCESS; } void usb_read_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem) { } void usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem) { } void usb_read_port_cancel(struct intf_hdl *pintfhdl) { int i; struct recv_buf *precvbuf; _adapter *padapter = pintfhdl->padapter; precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf; DBG_871X("%s\n", __func__); padapter->bReadPortCancel = _TRUE; for (i=0; i < NR_RECVBUFF ; i++) { precvbuf->reuse = _TRUE; if (precvbuf->purb) { //DBG_8192C("usb_read_port_cancel : usb_kill_urb \n"); usb_kill_urb(precvbuf->purb); } precvbuf++; } #ifdef CONFIG_USB_INTERRUPT_IN_PIPE usb_kill_urb(padapter->recvpriv.int_in_urb); #endif } static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs) { _irqL irqL; int i; struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context; //struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data; //_adapter *padapter = pxmitframe->padapter; _adapter *padapter = pxmitbuf->padapter; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; //struct pkt_attrib *pattrib = &pxmitframe->attrib; _func_enter_; switch(pxmitbuf->flags) { case VO_QUEUE_INX: pxmitpriv->voq_cnt--; break; case VI_QUEUE_INX: pxmitpriv->viq_cnt--; break; case BE_QUEUE_INX: pxmitpriv->beq_cnt--; break; case BK_QUEUE_INX: pxmitpriv->bkq_cnt--; break; case HIGH_QUEUE_INX: #ifdef CONFIG_AP_MODE rtw_chk_hi_queue_cmd(padapter); #endif break; default: break; } /* _enter_critical(&pxmitpriv->lock, &irqL); pxmitpriv->txirp_cnt--; switch(pattrib->priority) { case 1: case 2: pxmitpriv->bkq_cnt--; //DBG_8192C("pxmitpriv->bkq_cnt=%d\n", pxmitpriv->bkq_cnt); break; case 4: case 5: pxmitpriv->viq_cnt--; //DBG_8192C("pxmitpriv->viq_cnt=%d\n", pxmitpriv->viq_cnt); break; case 6: case 7: pxmitpriv->voq_cnt--; //DBG_8192C("pxmitpriv->voq_cnt=%d\n", pxmitpriv->voq_cnt); break; case 0: case 3: default: pxmitpriv->beq_cnt--; //DBG_8192C("pxmitpriv->beq_cnt=%d\n", pxmitpriv->beq_cnt); break; } _exit_critical(&pxmitpriv->lock, &irqL); if(pxmitpriv->txirp_cnt==0) { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port_complete: txirp_cnt== 0, set allrxreturnevt!\n")); _rtw_up_sema(&(pxmitpriv->tx_retevt)); } */ //rtw_free_xmitframe(pxmitpriv, pxmitframe); if(padapter->bSurpriseRemoved || padapter->bDriverStopped ||padapter->bWritePortCancel) { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved)); DBG_8192C("%s(): TX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bWritePortCancel(%d) pxmitbuf->ext_tag(%x) \n", __FUNCTION__,padapter->bDriverStopped, padapter->bSurpriseRemoved,padapter->bReadPortCancel,pxmitbuf->ext_tag); goto check_completion; } if (purb->status==0) { } else { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port_complete : purb->status(%d) != 0 \n", purb->status)); DBG_871X("###=> urb_write_port_complete status(%d)\n",purb->status); if((purb->status==-EPIPE)||(purb->status==-EPROTO)) { //usb_clear_halt(pusbdev, purb->pipe); //msleep(10); sreset_set_wifi_error_status(padapter, USB_WRITE_PORT_FAIL); } else if (purb->status == -EINPROGRESS) { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port_complete: EINPROGESS\n")); goto check_completion; } else if (purb->status == -ENOENT) { DBG_871X("%s: -ENOENT\n", __func__); goto check_completion; } else if (purb->status == -ECONNRESET) { DBG_871X("%s: -ECONNRESET\n", __func__); goto check_completion; } else if (purb->status == -ESHUTDOWN) { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port_complete: ESHUTDOWN\n")); padapter->bDriverStopped=_TRUE; RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port_complete:bDriverStopped=TRUE\n")); goto check_completion; } else { padapter->bSurpriseRemoved=_TRUE; DBG_8192C("bSurpriseRemoved=TRUE\n"); //rtl8192cu_trigger_gpio_0(padapter); RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port_complete:bSurpriseRemoved=TRUE\n")); goto check_completion; } } #ifdef DBG_CONFIG_ERROR_DETECT { HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); pHalData->srestpriv.last_tx_complete_time = rtw_get_current_time(); } #endif check_completion: rtw_sctx_done_err(&pxmitbuf->sctx, purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR : RTW_SCTX_DONE_SUCCESS); rtw_free_xmitbuf(pxmitpriv, pxmitbuf); //if(rtw_txframes_pending(padapter)) { tasklet_hi_schedule(&pxmitpriv->xmit_tasklet); } _func_exit_; } u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem) { _irqL irqL; unsigned int pipe; int status; u32 ret = _FAIL, bwritezero = _FALSE; PURB purb = NULL; _adapter *padapter = (_adapter *)pintfhdl->padapter; struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_buf *pxmitbuf = (struct xmit_buf *)wmem; struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data; struct usb_device *pusbd = pdvobj->pusbdev; struct pkt_attrib *pattrib = &pxmitframe->attrib; _func_enter_; RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("+usb_write_port\n")); if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||(padapter->pwrctrlpriv.pnp_bstop_trx)) { #ifdef DBG_TX DBG_871X(" DBG_TX %s:%d bDriverStopped%d, bSurpriseRemoved:%d, pnp_bstop_trx:%d\n",__FUNCTION__, __LINE__ ,padapter->bDriverStopped, padapter->bSurpriseRemoved, padapter->pwrctrlpriv.pnp_bstop_trx ); #endif RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port:( padapter->bDriverStopped ||padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n")); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY); goto exit; } _enter_critical(&pxmitpriv->lock, &irqL); switch(addr) { case VO_QUEUE_INX: pxmitpriv->voq_cnt++; pxmitbuf->flags = VO_QUEUE_INX; break; case VI_QUEUE_INX: pxmitpriv->viq_cnt++; pxmitbuf->flags = VI_QUEUE_INX; break; case BE_QUEUE_INX: pxmitpriv->beq_cnt++; pxmitbuf->flags = BE_QUEUE_INX; break; case BK_QUEUE_INX: pxmitpriv->bkq_cnt++; pxmitbuf->flags = BK_QUEUE_INX; break; case HIGH_QUEUE_INX: pxmitbuf->flags = HIGH_QUEUE_INX; break; default: pxmitbuf->flags = MGT_QUEUE_INX; break; } _exit_critical(&pxmitpriv->lock, &irqL); purb = pxmitbuf->pxmit_urb[0]; #if 0 if(pdvobj->ishighspeed) { if(cnt> 0 && cnt%512 == 0) { //DBG_8192C("ishighspeed, cnt=%d\n", cnt); bwritezero = _TRUE; } } else { if(cnt > 0 && cnt%64 == 0) { //DBG_8192C("cnt=%d\n", cnt); bwritezero = _TRUE; } } #endif //translate DMA FIFO addr to pipehandle pipe = ffaddr2pipehdl(pdvobj, addr); #ifdef CONFIG_REDUCE_USB_TX_INT if ( (pxmitpriv->free_xmitbuf_cnt%NR_XMITBUFF == 0) || (pxmitbuf->ext_tag == _TRUE) ) { purb->transfer_flags &= (~URB_NO_INTERRUPT); } else { purb->transfer_flags |= URB_NO_INTERRUPT; //DBG_8192C("URB_NO_INTERRUPT "); } #endif usb_fill_bulk_urb(purb, pusbd, pipe, pxmitframe->buf_addr, //= pxmitbuf->pbuf cnt, usb_write_port_complete, pxmitbuf);//context is pxmitbuf #ifdef CONFIG_USE_USB_BUFFER_ALLOC_TX purb->transfer_dma = pxmitbuf->dma_transfer_addr; purb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; purb->transfer_flags |= URB_ZERO_PACKET; #endif // CONFIG_USE_USB_BUFFER_ALLOC_TX #if 0 if (bwritezero) { purb->transfer_flags |= URB_ZERO_PACKET; } #endif status = usb_submit_urb(purb, GFP_ATOMIC); if (!status) { #ifdef DBG_CONFIG_ERROR_DETECT { HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); pHalData->srestpriv.last_tx_time = rtw_get_current_time(); } #endif } else { rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR); DBG_871X("usb_write_port, status=%d\n", status); RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_write_port(): usb_submit_urb, status=%x\n", status)); switch (status) { case -ENODEV: padapter->bDriverStopped=_TRUE; break; default: break; } goto exit; } ret= _SUCCESS; // Commented by Albert 2009/10/13 // We add the URB_ZERO_PACKET flag to urb so that the host will send the zero packet automatically. /* if(bwritezero == _TRUE) { usb_bulkout_zero(pintfhdl, addr); } */ RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("-usb_write_port\n")); exit: if (ret != _SUCCESS) rtw_free_xmitbuf(pxmitpriv, pxmitbuf); _func_exit_; return ret; } void usb_write_port_cancel(struct intf_hdl *pintfhdl) { int i, j; _adapter *padapter = pintfhdl->padapter; struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf; DBG_871X("%s \n", __func__); padapter->bWritePortCancel = _TRUE; for (i=0; i<NR_XMITBUFF; i++) { for (j=0; j<8; j++) { if (pxmitbuf->pxmit_urb[j]) { usb_kill_urb(pxmitbuf->pxmit_urb[j]); } } pxmitbuf++; } pxmitbuf = (struct xmit_buf*)padapter->xmitpriv.pxmit_extbuf; for (i = 0; i < NR_XMIT_EXTBUFF; i++) { for (j=0; j<8; j++) { if(pxmitbuf->pxmit_urb[j]) { usb_kill_urb(pxmitbuf->pxmit_urb[j]); } } pxmitbuf++; } }
gpl-2.0
rockly703/linux-2.6.28-mini6410
sound/soc/codecs/tlv320aic26.c
26
14755
/* * Texas Instruments TLV320AIC26 low power audio CODEC * ALSA SoC CODEC driver * * Copyright (C) 2008 Secret Lab Technologies Ltd. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/soc-of-simple.h> #include <sound/initval.h> #include "tlv320aic26.h" MODULE_DESCRIPTION("ASoC TLV320AIC26 codec driver"); MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_LICENSE("GPL"); /* AIC26 driver private data */ struct aic26 { struct spi_device *spi; struct snd_soc_codec codec; u16 reg_cache[AIC26_NUM_REGS]; /* shadow registers */ int master; int datfm; int mclk; /* Keyclick parameters */ int keyclick_amplitude; int keyclick_freq; int keyclick_len; }; /* --------------------------------------------------------------------- * Register access routines */ static unsigned int aic26_reg_read(struct snd_soc_codec *codec, unsigned int reg) { struct aic26 *aic26 = codec->private_data; u16 *cache = codec->reg_cache; u16 cmd, value; u8 buffer[2]; int rc; if (reg >= AIC26_NUM_REGS) { WARN_ON_ONCE(1); return 0; } /* Do SPI transfer; first 16bits are command; remaining is * register contents */ cmd = AIC26_READ_COMMAND_WORD(reg); buffer[0] = (cmd >> 8) & 0xff; buffer[1] = cmd & 0xff; rc = spi_write_then_read(aic26->spi, buffer, 2, buffer, 2); if (rc) { dev_err(&aic26->spi->dev, "AIC26 reg read error\n"); return -EIO; } value = (buffer[0] << 8) | buffer[1]; /* Update the cache before returning with the value */ cache[reg] = value; return value; } static unsigned int aic26_reg_read_cache(struct snd_soc_codec *codec, unsigned int reg) { u16 *cache = codec->reg_cache; if (reg >= AIC26_NUM_REGS) { WARN_ON_ONCE(1); return 0; } return cache[reg]; } static int aic26_reg_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct aic26 *aic26 = codec->private_data; u16 *cache = codec->reg_cache; u16 cmd; u8 buffer[4]; int rc; if (reg >= AIC26_NUM_REGS) { WARN_ON_ONCE(1); return -EINVAL; } /* Do SPI transfer; first 16bits are command; remaining is data * to write into register */ cmd = AIC26_WRITE_COMMAND_WORD(reg); buffer[0] = (cmd >> 8) & 0xff; buffer[1] = cmd & 0xff; buffer[2] = value >> 8; buffer[3] = value; rc = spi_write(aic26->spi, buffer, 4); if (rc) { dev_err(&aic26->spi->dev, "AIC26 reg read error\n"); return -EIO; } /* update cache before returning */ cache[reg] = value; return 0; } /* --------------------------------------------------------------------- * Digital Audio Interface Operations */ static int aic26_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_codec *codec = socdev->codec; struct aic26 *aic26 = codec->private_data; int fsref, divisor, wlen, pval, jval, dval, qval; u16 reg; dev_dbg(&aic26->spi->dev, "aic26_hw_params(substream=%p, params=%p)\n", substream, params); dev_dbg(&aic26->spi->dev, "rate=%i format=%i\n", params_rate(params), params_format(params)); switch (params_rate(params)) { case 8000: fsref = 48000; divisor = AIC26_DIV_6; break; case 11025: fsref = 44100; divisor = AIC26_DIV_4; break; case 12000: fsref = 48000; divisor = AIC26_DIV_4; break; case 16000: fsref = 48000; divisor = AIC26_DIV_3; break; case 22050: fsref = 44100; divisor = AIC26_DIV_2; break; case 24000: fsref = 48000; divisor = AIC26_DIV_2; break; case 32000: fsref = 48000; divisor = AIC26_DIV_1_5; break; case 44100: fsref = 44100; divisor = AIC26_DIV_1; break; case 48000: fsref = 48000; divisor = AIC26_DIV_1; break; default: dev_dbg(&aic26->spi->dev, "bad rate\n"); return -EINVAL; } /* select data word length */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: wlen = AIC26_WLEN_16; break; case SNDRV_PCM_FORMAT_S16_BE: wlen = AIC26_WLEN_16; break; case SNDRV_PCM_FORMAT_S24_BE: wlen = AIC26_WLEN_24; break; case SNDRV_PCM_FORMAT_S32_BE: wlen = AIC26_WLEN_32; break; default: dev_dbg(&aic26->spi->dev, "bad format\n"); return -EINVAL; } /* Configure PLL */ pval = 1; jval = (fsref == 44100) ? 7 : 8; dval = (fsref == 44100) ? 5264 : 1920; qval = 0; reg = 0x8000 | qval << 11 | pval << 8 | jval << 2; aic26_reg_write(codec, AIC26_REG_PLL_PROG1, reg); reg = dval << 2; aic26_reg_write(codec, AIC26_REG_PLL_PROG2, reg); /* Audio Control 3 (master mode, fsref rate) */ reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL3); reg &= ~0xf800; if (aic26->master) reg |= 0x0800; if (fsref == 48000) reg |= 0x2000; aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg); /* Audio Control 1 (FSref divisor) */ reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL1); reg &= ~0x0fff; reg |= wlen | aic26->datfm | (divisor << 3) | divisor; aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL1, reg); return 0; } /** * aic26_mute - Mute control to reduce noise when changing audio format */ static int aic26_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; struct aic26 *aic26 = codec->private_data; u16 reg = aic26_reg_read_cache(codec, AIC26_REG_DAC_GAIN); dev_dbg(&aic26->spi->dev, "aic26_mute(dai=%p, mute=%i)\n", dai, mute); if (mute) reg |= 0x8080; else reg &= ~0x8080; aic26_reg_write(codec, AIC26_REG_DAC_GAIN, reg); return 0; } static int aic26_set_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct aic26 *aic26 = codec->private_data; dev_dbg(&aic26->spi->dev, "aic26_set_sysclk(dai=%p, clk_id==%i," " freq=%i, dir=%i)\n", codec_dai, clk_id, freq, dir); /* MCLK needs to fall between 2MHz and 50 MHz */ if ((freq < 2000000) || (freq > 50000000)) return -EINVAL; aic26->mclk = freq; return 0; } static int aic26_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct aic26 *aic26 = codec->private_data; dev_dbg(&aic26->spi->dev, "aic26_set_fmt(dai=%p, fmt==%i)\n", codec_dai, fmt); /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: aic26->master = 1; break; case SND_SOC_DAIFMT_CBS_CFS: aic26->master = 0; break; default: dev_dbg(&aic26->spi->dev, "bad master\n"); return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: aic26->datfm = AIC26_DATFM_I2S; break; case SND_SOC_DAIFMT_DSP_A: aic26->datfm = AIC26_DATFM_DSP; break; case SND_SOC_DAIFMT_RIGHT_J: aic26->datfm = AIC26_DATFM_RIGHTJ; break; case SND_SOC_DAIFMT_LEFT_J: aic26->datfm = AIC26_DATFM_LEFTJ; break; default: dev_dbg(&aic26->spi->dev, "bad format\n"); return -EINVAL; } return 0; } /* --------------------------------------------------------------------- * Digital Audio Interface Definition */ #define AIC26_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\ SNDRV_PCM_RATE_48000) #define AIC26_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE |\ SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE) struct snd_soc_dai aic26_dai = { .name = "tlv320aic26", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = AIC26_RATES, .formats = AIC26_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 2, .channels_max = 2, .rates = AIC26_RATES, .formats = AIC26_FORMATS, }, .ops = { .hw_params = aic26_hw_params, }, .dai_ops = { .digital_mute = aic26_mute, .set_sysclk = aic26_set_sysclk, .set_fmt = aic26_set_fmt, }, }; EXPORT_SYMBOL_GPL(aic26_dai); /* --------------------------------------------------------------------- * ALSA controls */ static const char *aic26_capture_src_text[] = {"Mic", "Aux"}; static const struct soc_enum aic26_capture_src_enum = SOC_ENUM_SINGLE(AIC26_REG_AUDIO_CTRL1, 12, 2, aic26_capture_src_text); static const struct snd_kcontrol_new aic26_snd_controls[] = { /* Output */ SOC_DOUBLE("PCM Playback Volume", AIC26_REG_DAC_GAIN, 8, 0, 0x7f, 1), SOC_DOUBLE("PCM Playback Switch", AIC26_REG_DAC_GAIN, 15, 7, 1, 1), SOC_SINGLE("PCM Capture Volume", AIC26_REG_ADC_GAIN, 8, 0x7f, 0), SOC_SINGLE("PCM Capture Mute", AIC26_REG_ADC_GAIN, 15, 1, 1), SOC_SINGLE("Keyclick activate", AIC26_REG_AUDIO_CTRL2, 15, 0x1, 0), SOC_SINGLE("Keyclick amplitude", AIC26_REG_AUDIO_CTRL2, 12, 0x7, 0), SOC_SINGLE("Keyclick frequency", AIC26_REG_AUDIO_CTRL2, 8, 0x7, 0), SOC_SINGLE("Keyclick period", AIC26_REG_AUDIO_CTRL2, 4, 0xf, 0), SOC_ENUM("Capture Source", aic26_capture_src_enum), }; /* --------------------------------------------------------------------- * SoC CODEC portion of driver: probe and release routines */ static int aic26_probe(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec; struct snd_kcontrol *kcontrol; struct aic26 *aic26; int i, ret, err; dev_info(&pdev->dev, "Probing AIC26 SoC CODEC driver\n"); dev_dbg(&pdev->dev, "socdev=%p\n", socdev); dev_dbg(&pdev->dev, "codec_data=%p\n", socdev->codec_data); /* Fetch the relevant aic26 private data here (it's already been * stored in the .codec pointer) */ aic26 = socdev->codec_data; if (aic26 == NULL) { dev_err(&pdev->dev, "aic26: missing codec pointer\n"); return -ENODEV; } codec = &aic26->codec; socdev->codec = codec; dev_dbg(&pdev->dev, "Registering PCMs, dev=%p, socdev->dev=%p\n", &pdev->dev, socdev->dev); /* register pcms */ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { dev_err(&pdev->dev, "aic26: failed to create pcms\n"); return -ENODEV; } /* register controls */ dev_dbg(&pdev->dev, "Registering controls\n"); for (i = 0; i < ARRAY_SIZE(aic26_snd_controls); i++) { kcontrol = snd_soc_cnew(&aic26_snd_controls[i], codec, NULL); err = snd_ctl_add(codec->card, kcontrol); WARN_ON(err < 0); } /* CODEC is setup, we can register the card now */ dev_dbg(&pdev->dev, "Registering card\n"); ret = snd_soc_register_card(socdev); if (ret < 0) { dev_err(&pdev->dev, "aic26: failed to register card\n"); goto card_err; } return 0; card_err: snd_soc_free_pcms(socdev); return ret; } static int aic26_remove(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); snd_soc_free_pcms(socdev); return 0; } struct snd_soc_codec_device aic26_soc_codec_dev = { .probe = aic26_probe, .remove = aic26_remove, }; EXPORT_SYMBOL_GPL(aic26_soc_codec_dev); /* --------------------------------------------------------------------- * SPI device portion of driver: sysfs files for debugging */ static ssize_t aic26_keyclick_show(struct device *dev, struct device_attribute *attr, char *buf) { struct aic26 *aic26 = dev_get_drvdata(dev); int val, amp, freq, len; val = aic26_reg_read_cache(&aic26->codec, AIC26_REG_AUDIO_CTRL2); amp = (val >> 12) & 0x7; freq = (125 << ((val >> 8) & 0x7)) >> 1; len = 2 * (1 + ((val >> 4) & 0xf)); return sprintf(buf, "amp=%x freq=%iHz len=%iclks\n", amp, freq, len); } /* Any write to the keyclick attribute will trigger the keyclick event */ static ssize_t aic26_keyclick_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct aic26 *aic26 = dev_get_drvdata(dev); int val; val = aic26_reg_read_cache(&aic26->codec, AIC26_REG_AUDIO_CTRL2); val |= 0x8000; aic26_reg_write(&aic26->codec, AIC26_REG_AUDIO_CTRL2, val); return count; } static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set); /* --------------------------------------------------------------------- * SPI device portion of driver: probe and release routines and SPI * driver registration. */ static int aic26_spi_probe(struct spi_device *spi) { struct aic26 *aic26; int rc, i, reg; dev_dbg(&spi->dev, "probing tlv320aic26 spi device\n"); /* Allocate driver data */ aic26 = kzalloc(sizeof *aic26, GFP_KERNEL); if (!aic26) return -ENOMEM; /* Initialize the driver data */ aic26->spi = spi; dev_set_drvdata(&spi->dev, aic26); /* Setup what we can in the codec structure so that the register * access functions will work as expected. More will be filled * out when it is probed by the SoC CODEC part of this driver */ aic26->codec.private_data = aic26; aic26->codec.name = "aic26"; aic26->codec.owner = THIS_MODULE; aic26->codec.dai = &aic26_dai; aic26->codec.num_dai = 1; aic26->codec.read = aic26_reg_read; aic26->codec.write = aic26_reg_write; aic26->master = 1; mutex_init(&aic26->codec.mutex); INIT_LIST_HEAD(&aic26->codec.dapm_widgets); INIT_LIST_HEAD(&aic26->codec.dapm_paths); aic26->codec.reg_cache_size = AIC26_NUM_REGS; aic26->codec.reg_cache = aic26->reg_cache; /* Reset the codec to power on defaults */ aic26_reg_write(&aic26->codec, AIC26_REG_RESET, 0xBB00); /* Power up CODEC */ aic26_reg_write(&aic26->codec, AIC26_REG_POWER_CTRL, 0); /* Audio Control 3 (master mode, fsref rate) */ reg = aic26_reg_read(&aic26->codec, AIC26_REG_AUDIO_CTRL3); reg &= ~0xf800; reg |= 0x0800; /* set master mode */ aic26_reg_write(&aic26->codec, AIC26_REG_AUDIO_CTRL3, reg); /* Fill register cache */ for (i = 0; i < ARRAY_SIZE(aic26->reg_cache); i++) aic26_reg_read(&aic26->codec, i); /* Register the sysfs files for debugging */ /* Create SysFS files */ rc = device_create_file(&spi->dev, &dev_attr_keyclick); if (rc) dev_info(&spi->dev, "error creating sysfs files\n"); #if defined(CONFIG_SND_SOC_OF_SIMPLE) /* Tell the of_soc helper about this codec */ of_snd_soc_register_codec(&aic26_soc_codec_dev, aic26, &aic26_dai, spi->dev.archdata.of_node); #endif dev_dbg(&spi->dev, "SPI device initialized\n"); return 0; } static int aic26_spi_remove(struct spi_device *spi) { struct aic26 *aic26 = dev_get_drvdata(&spi->dev); kfree(aic26); return 0; } static struct spi_driver aic26_spi = { .driver = { .name = "tlv320aic26", .owner = THIS_MODULE, }, .probe = aic26_spi_probe, .remove = aic26_spi_remove, }; static int __init aic26_init(void) { return spi_register_driver(&aic26_spi); } module_init(aic26_init); static void __exit aic26_exit(void) { spi_unregister_driver(&aic26_spi); } module_exit(aic26_exit);
gpl-2.0
chewcode/obs-studio
plugins/text-freetype2/text-functionality.c
26
12417
/****************************************************************************** Copyright (C) 2014 by Nibbles This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ #include <obs-module.h> #include <util/platform.h> #include <ft2build.h> #include FT_FREETYPE_H #include <sys/stat.h> #include "text-freetype2.h" #include "obs-convenience.h" float offsets[16] = { -2.0f, 0.0f, 0.0f, -2.0f, 2.0f, 0.0f, 2.0f, 0.0f, 0.0f, 2.0f, 0.0f, 2.0f, -2.0f, 0.0f, -2.0f, 0.0f }; extern uint32_t texbuf_w, texbuf_h; void draw_outlines(struct ft2_source *srcdata) { // Horrible (hopefully temporary) solution for outlines. uint32_t *tmp; struct gs_vb_data *vdata = gs_vertexbuffer_get_data(srcdata->vbuf); if (!srcdata->text) return; tmp = vdata->colors; vdata->colors = srcdata->colorbuf; gs_matrix_push(); for (int32_t i = 0; i < 8; i++) { gs_matrix_translate3f(offsets[i * 2], offsets[(i * 2) + 1], 0.0f); draw_uv_vbuffer(srcdata->vbuf, srcdata->tex, srcdata->draw_effect, (uint32_t)wcslen(srcdata->text) * 6); } gs_matrix_identity(); gs_matrix_pop(); vdata->colors = tmp; } void draw_drop_shadow(struct ft2_source *srcdata) { // Horrible (hopefully temporary) solution for drop shadow. uint32_t *tmp; struct gs_vb_data *vdata = gs_vertexbuffer_get_data(srcdata->vbuf); if (!srcdata->text) return; tmp = vdata->colors; vdata->colors = srcdata->colorbuf; gs_matrix_push(); gs_matrix_translate3f(4.0f, 4.0f, 0.0f); draw_uv_vbuffer(srcdata->vbuf, srcdata->tex, srcdata->draw_effect, (uint32_t)wcslen(srcdata->text) * 6); gs_matrix_identity(); gs_matrix_pop(); vdata->colors = tmp; } void set_up_vertex_buffer(struct ft2_source *srcdata) { FT_UInt glyph_index = 0; uint32_t x = 0, space_pos = 0, word_width = 0; size_t len; if (!srcdata->text) return; if (srcdata->custom_width >= 100) srcdata->cx = srcdata->custom_width; else srcdata->cx = get_ft2_text_width(srcdata->text, srcdata); srcdata->cy = srcdata->max_h; obs_enter_graphics(); if (srcdata->vbuf != NULL) { gs_vertbuffer_t *tmpvbuf = srcdata->vbuf; srcdata->vbuf = NULL; gs_vertexbuffer_destroy(tmpvbuf); } srcdata->vbuf = create_uv_vbuffer((uint32_t)wcslen(srcdata->text) * 6, true); if (srcdata->custom_width <= 100) goto skip_word_wrap; if (!srcdata->word_wrap) goto skip_word_wrap; len = wcslen(srcdata->text); for (uint32_t i = 0; i <= len; i++) { if (i == wcslen(srcdata->text)) goto eos_check; if (srcdata->text[i] != L' ' && srcdata->text[i] != L'\n') goto next_char; eos_check:; if (x + word_width > srcdata->custom_width) { if (space_pos != 0) srcdata->text[space_pos] = L'\n'; x = 0; } if (i == wcslen(srcdata->text)) goto eos_skip; x += word_width; word_width = 0; if (srcdata->text[i] == L'\n') x = 0; if (srcdata->text[i] == L' ') space_pos = i; next_char:; glyph_index = FT_Get_Char_Index(srcdata->font_face, srcdata->text[i]); word_width += src_glyph->xadv; eos_skip:; } skip_word_wrap:; fill_vertex_buffer(srcdata); obs_leave_graphics(); } void fill_vertex_buffer(struct ft2_source *srcdata) { struct gs_vb_data *vdata = gs_vertexbuffer_get_data(srcdata->vbuf); if (vdata == NULL || !srcdata->text) return; struct vec2 *tvarray = (struct vec2 *)vdata->tvarray[0].array; uint32_t *col = (uint32_t *)vdata->colors; FT_UInt glyph_index = 0; uint32_t dx = 0, dy = srcdata->max_h, max_y = dy; uint32_t cur_glyph = 0; size_t len = wcslen(srcdata->text); if (srcdata->colorbuf != NULL) { bfree(srcdata->colorbuf); srcdata->colorbuf = NULL; } srcdata->colorbuf = bzalloc(sizeof(uint32_t)*wcslen(srcdata->text) * 6); for (size_t i = 0; i < len * 6; i++) { srcdata->colorbuf[i] = 0xFF000000; } for (size_t i = 0; i < len; i++) { add_linebreak:; if (srcdata->text[i] != L'\n') goto draw_glyph; dx = 0; i++; dy += srcdata->max_h + 4; if (i == wcslen(srcdata->text)) goto skip_glyph; if (srcdata->text[i] == L'\n') goto add_linebreak; draw_glyph:; // Skip filthy dual byte Windows line breaks if (srcdata->text[i] == L'\r') goto skip_glyph; glyph_index = FT_Get_Char_Index(srcdata->font_face, srcdata->text[i]); if (src_glyph == NULL) goto skip_glyph; if (srcdata->custom_width < 100) goto skip_custom_width; if (dx + src_glyph->xadv > srcdata->custom_width) { dx = 0; dy += srcdata->max_h + 4; } skip_custom_width:; set_v3_rect(vdata->points + (cur_glyph * 6), (float)dx + (float)src_glyph->xoff, (float)dy - (float)src_glyph->yoff, (float)src_glyph->w, (float)src_glyph->h); set_v2_uv(tvarray + (cur_glyph * 6), src_glyph->u, src_glyph->v, src_glyph->u2, src_glyph->v2); set_rect_colors2(col + (cur_glyph * 6), srcdata->color[0], srcdata->color[1]); dx += src_glyph->xadv; if (dy - (float)src_glyph->yoff + src_glyph->h > max_y) max_y = dy - src_glyph->yoff + src_glyph->h; cur_glyph++; skip_glyph:; } srcdata->cy = max_y; } void cache_standard_glyphs(struct ft2_source *srcdata) { for (uint32_t i = 0; i < num_cache_slots; i++) { if (srcdata->cacheglyphs[i] != NULL) { bfree(srcdata->cacheglyphs[i]); srcdata->cacheglyphs[i] = NULL; } } srcdata->texbuf_x = 0; srcdata->texbuf_y = 0; cache_glyphs(srcdata, L"abcdefghijklmnopqrstuvwxyz" \ L"ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890" \ L"!@#$%^&*()-_=+,<.>/?\\|[]{}`~ \'\"\0"); } #define glyph_pos x + (y*slot->bitmap.pitch) #define buf_pos (dx + x) + ((dy + y) * texbuf_w) void cache_glyphs(struct ft2_source *srcdata, wchar_t *cache_glyphs) { FT_GlyphSlot slot; FT_UInt glyph_index = 0; if (!srcdata->font_face || !cache_glyphs) return; slot = srcdata->font_face->glyph; uint32_t dx = srcdata->texbuf_x, dy = srcdata->texbuf_y; uint8_t alpha; int32_t cached_glyphs = 0; size_t len = wcslen(cache_glyphs); for (size_t i = 0; i < len; i++) { glyph_index = FT_Get_Char_Index(srcdata->font_face, cache_glyphs[i]); if (src_glyph != NULL) goto skip_glyph; FT_Load_Glyph(srcdata->font_face, glyph_index, FT_LOAD_DEFAULT); FT_Render_Glyph(slot, FT_RENDER_MODE_NORMAL); uint32_t g_w = slot->bitmap.width; uint32_t g_h = slot->bitmap.rows; if (srcdata->max_h < g_h) srcdata->max_h = g_h; if (dx + g_w >= texbuf_w) { dx = 0; dy += srcdata->max_h + 1; } src_glyph = bzalloc(sizeof(struct glyph_info)); src_glyph->u = (float)dx / (float)texbuf_w; src_glyph->u2 = (float)(dx + g_w) / (float)texbuf_w; src_glyph->v = (float)dy / (float)texbuf_h; src_glyph->v2 = (float)(dy + g_h) / (float)texbuf_h; src_glyph->w = g_w; src_glyph->h = g_h; src_glyph->yoff = slot->bitmap_top; src_glyph->xoff = slot->bitmap_left; src_glyph->xadv = slot->advance.x >> 6; for (uint32_t y = 0; y < g_h; y++) { for (uint32_t x = 0; x < g_w; x++) { alpha = slot->bitmap.buffer[glyph_pos]; srcdata->texbuf[buf_pos] = 0x00FFFFFF ^ ((uint32_t)alpha << 24); } } dx += (g_w + 1); if (dx >= texbuf_w) { dx = 0; dy += srcdata->max_h; } cached_glyphs++; skip_glyph:; } srcdata->texbuf_x = dx; srcdata->texbuf_y = dy; if (cached_glyphs > 0) { obs_enter_graphics(); if (srcdata->tex != NULL) { gs_texture_t *tmp_texture = NULL; tmp_texture = srcdata->tex; srcdata->tex = NULL; gs_texture_destroy(tmp_texture); } srcdata->tex = gs_texture_create(texbuf_w, texbuf_h, GS_RGBA, 1, (const uint8_t **)&srcdata->texbuf, 0); obs_leave_graphics(); } } time_t get_modified_timestamp(char *filename) { struct stat stats; // stat is apparently terrifying and horrible, but we only call it once // every second at most. stat(filename, &stats); return stats.st_mtime; } static void remove_cr(wchar_t* source) { int j = 0; for (int i = 0; source[i] != '\0'; ++i) { if (source[i] != L'\r') { source[j++] = source[i]; } } source[j] = '\0'; } void load_text_from_file(struct ft2_source *srcdata, const char *filename) { FILE *tmp_file = NULL; uint32_t filesize = 0; char *tmp_read = NULL; uint16_t header = 0; size_t bytes_read; tmp_file = fopen(filename, "rb"); if (tmp_file == NULL) { if (!srcdata->file_load_failed) { blog(LOG_WARNING, "Failed to open file %s", filename); srcdata->file_load_failed = true; } return; } fseek(tmp_file, 0, SEEK_END); filesize = (uint32_t)ftell(tmp_file); fseek(tmp_file, 0, SEEK_SET); bytes_read = fread(&header, 2, 1, tmp_file); if (bytes_read == 2 && header == 0xFEFF) { // File is already in UTF-16 format if (srcdata->text != NULL) { bfree(srcdata->text); srcdata->text = NULL; } srcdata->text = bzalloc(filesize); bytes_read = fread(srcdata->text, filesize - 2, 1, tmp_file); srcdata->m_timestamp = get_modified_timestamp(srcdata->text_file); bfree(tmp_read); fclose(tmp_file); return; } fseek(tmp_file, 0, SEEK_SET); srcdata->m_timestamp = get_modified_timestamp(srcdata->text_file); tmp_read = bzalloc(filesize + 1); bytes_read = fread(tmp_read, filesize, 1, tmp_file); fclose(tmp_file); if (srcdata->text != NULL) { bfree(srcdata->text); srcdata->text = NULL; } srcdata->text = bzalloc((strlen(tmp_read) + 1)*sizeof(wchar_t)); os_utf8_to_wcs(tmp_read, strlen(tmp_read), srcdata->text, (strlen(tmp_read) + 1)); remove_cr(srcdata->text); bfree(tmp_read); } void read_from_end(struct ft2_source *srcdata, const char *filename) { FILE *tmp_file = NULL; uint32_t filesize = 0, cur_pos = 0; char *tmp_read = NULL; uint16_t value = 0, line_breaks = 0; size_t bytes_read; char bvalue; bool utf16 = false; tmp_file = fopen(filename, "rb"); if (tmp_file == NULL) { if (!srcdata->file_load_failed) { blog(LOG_WARNING, "Failed to open file %s", filename); srcdata->file_load_failed = true; } return; } bytes_read = fread(&value, 2, 1, tmp_file); if (bytes_read == 2 && value == 0xFEFF) utf16 = true; fseek(tmp_file, 0, SEEK_END); filesize = (uint32_t)ftell(tmp_file); cur_pos = filesize; while (line_breaks <= 6 && cur_pos != 0) { if (!utf16) cur_pos--; else cur_pos -= 2; fseek(tmp_file, cur_pos, SEEK_SET); if (!utf16) { bytes_read = fread(&bvalue, 1, 1, tmp_file); if (bytes_read == 1 && bvalue == '\n') line_breaks++; } else { bytes_read = fread(&value, 2, 1, tmp_file); if (bytes_read == 2 && value == L'\n') line_breaks++; } } if (cur_pos != 0) cur_pos += (utf16) ? 2 : 1; fseek(tmp_file, cur_pos, SEEK_SET); if (utf16) { if (srcdata->text != NULL) { bfree(srcdata->text); srcdata->text = NULL; } srcdata->text = bzalloc(filesize - cur_pos); bytes_read = fread(srcdata->text, (filesize - cur_pos), 1, tmp_file); remove_cr(srcdata->text); srcdata->m_timestamp = get_modified_timestamp(srcdata->text_file); bfree(tmp_read); fclose(tmp_file); return; } tmp_read = bzalloc((filesize - cur_pos) + 1); bytes_read = fread(tmp_read, filesize - cur_pos, 1, tmp_file); fclose(tmp_file); if (srcdata->text != NULL) { bfree(srcdata->text); srcdata->text = NULL; } srcdata->text = bzalloc((strlen(tmp_read) + 1)*sizeof(wchar_t)); os_utf8_to_wcs(tmp_read, strlen(tmp_read), srcdata->text, (strlen(tmp_read) + 1)); remove_cr(srcdata->text); srcdata->m_timestamp = get_modified_timestamp(srcdata->text_file); bfree(tmp_read); } uint32_t get_ft2_text_width(wchar_t *text, struct ft2_source *srcdata) { FT_GlyphSlot slot = srcdata->font_face->glyph; FT_UInt glyph_index = 0; uint32_t w = 0, max_w = 0; size_t len; if (!text) return 0; len = wcslen(text); for (size_t i = 0; i < len; i++) { glyph_index = FT_Get_Char_Index(srcdata->font_face, text[i]); FT_Load_Glyph(srcdata->font_face, glyph_index, FT_LOAD_DEFAULT); if (text[i] == L'\n') w = 0; else { w += slot->advance.x >> 6; if (w > max_w) max_w = w; } } return max_w; }
gpl-2.0
friedrich420/HTC-ONE-M7-AEL-Kernel-5.0.2
arch/arm/mach-msm/board-8960-camera.c
282
19911
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/mach-types.h> #include <linux/gpio.h> #include <mach/board.h> #include <mach/msm_bus_board.h> #include <mach/gpiomux.h> #include "devices.h" #include "board-8960.h" #ifdef CONFIG_MSM_CAMERA #if (defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)) && \ defined(CONFIG_I2C) static struct i2c_board_info cam_expander_i2c_info[] = { { I2C_BOARD_INFO("sx1508q", 0x22), .platform_data = &msm8960_sx150x_data[SX150X_CAM] }, }; static struct msm_cam_expander_info cam_expander_info[] = { { cam_expander_i2c_info, MSM_8960_GSBI4_QUP_I2C_BUS_ID, }, }; #endif static struct gpiomux_setting cam_settings[] = { { .func = GPIOMUX_FUNC_GPIO, /*suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, { .func = GPIOMUX_FUNC_1, /*active 1*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /*active 2*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_1, /*active 3*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_5, /*active 4*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_6, /*active 5*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_2, /*active 6*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_3, /*active 7*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_GPIO, /*i2c suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, }; static struct msm_gpiomux_config msm8960_cdp_flash_configs[] = { { .gpio = 3, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[1], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, }; static struct msm_gpiomux_config msm8960_cam_common_configs[] = { { .gpio = 2, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 3, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 4, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[1], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 5, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 76, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 107, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, }; static struct msm_gpiomux_config msm8960_cam_2d_configs[] = { { .gpio = 18, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 19, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 20, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 21, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, }; #define VFE_CAMIF_TIMER1_GPIO 2 #define VFE_CAMIF_TIMER2_GPIO 3 #define VFE_CAMIF_TIMER3_GPIO_INT 4 static struct msm_camera_sensor_strobe_flash_data strobe_flash_xenon = { .flash_trigger = VFE_CAMIF_TIMER2_GPIO, .flash_charge = VFE_CAMIF_TIMER1_GPIO, .flash_charge_done = VFE_CAMIF_TIMER3_GPIO_INT, .flash_recharge_duration = 50000, .irq = MSM_GPIO_TO_INT(VFE_CAMIF_TIMER3_GPIO_INT), }; #ifdef CONFIG_MSM_CAMERA_FLASH static struct msm_camera_sensor_flash_src msm_flash_src = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT, ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO, ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO, ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A, }; #endif static struct msm_bus_vectors cam_init_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_preview_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 27648000, .ib = 110592000, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_video_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 154275840, .ib = 617103360, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_snapshot_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 274423680, .ib = 1097694720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, }; static struct msm_bus_vectors cam_zsl_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 302071680, .ib = 1208286720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_MM_IMEM, .ab = 43200000, .ib = 69120000, }, }; static struct msm_bus_paths cam_bus_client_config[] = { { ARRAY_SIZE(cam_init_vectors), cam_init_vectors, }, { ARRAY_SIZE(cam_preview_vectors), cam_preview_vectors, }, { ARRAY_SIZE(cam_video_vectors), cam_video_vectors, }, { ARRAY_SIZE(cam_snapshot_vectors), cam_snapshot_vectors, }, { ARRAY_SIZE(cam_zsl_vectors), cam_zsl_vectors, }, }; static struct msm_bus_scale_pdata cam_bus_client_pdata = { cam_bus_client_config, ARRAY_SIZE(cam_bus_client_config), .name = "msm_camera", }; static struct msm_camera_device_platform_data msm_camera_csi_device_data[] = { { .csid_core = 0, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, { .csid_core = 1, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, { .csid_core = 2, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, }; static struct camera_vreg_t msm_8960_back_cam_vreg[] = { {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2800000, 300000}, }; static struct camera_vreg_t msm_8960_front_cam_vreg[] = { {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, }; static struct gpio msm8960_common_cam_gpio[] = { {5, GPIOF_DIR_IN, "CAMIF_MCLK"}, {20, GPIOF_DIR_IN, "CAMIF_I2C_DATA"}, {21, GPIOF_DIR_IN, "CAMIF_I2C_CLK"}, }; static struct gpio msm8960_front_cam_gpio[] = { {76, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct gpio msm8960_back_cam_gpio[] = { {107, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct msm_gpio_set_tbl msm8960_front_cam_gpio_set_tbl[] = { {76, GPIOF_OUT_INIT_LOW, 1000}, {76, GPIOF_OUT_INIT_HIGH, 4000}, }; static struct msm_gpio_set_tbl msm8960_back_cam_gpio_set_tbl[] = { {107, GPIOF_OUT_INIT_LOW, 1000}, {107, GPIOF_OUT_INIT_HIGH, 4000}, }; static struct msm_camera_gpio_conf msm_8960_front_cam_gpio_conf = { .cam_gpiomux_conf_tbl = msm8960_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(msm8960_cam_2d_configs), .cam_gpio_common_tbl = msm8960_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(msm8960_common_cam_gpio), .cam_gpio_req_tbl = msm8960_front_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(msm8960_front_cam_gpio), .cam_gpio_set_tbl = msm8960_front_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(msm8960_front_cam_gpio_set_tbl), }; static struct msm_camera_gpio_conf msm_8960_back_cam_gpio_conf = { .cam_gpiomux_conf_tbl = msm8960_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(msm8960_cam_2d_configs), .cam_gpio_common_tbl = msm8960_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(msm8960_common_cam_gpio), .cam_gpio_req_tbl = msm8960_back_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(msm8960_back_cam_gpio), .cam_gpio_set_tbl = msm8960_back_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(msm8960_back_cam_gpio_set_tbl), }; static struct i2c_board_info msm_act_main_cam_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x11), }; static struct msm_actuator_info msm_act_main_cam_0_info = { .board_info = &msm_act_main_cam_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_0, .bus_id = MSM_8960_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct i2c_board_info msm_act_main_cam1_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x18), }; static struct msm_actuator_info msm_act_main_cam_1_info = { .board_info = &msm_act_main_cam1_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_1, .bus_id = MSM_8960_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct msm_camera_sensor_flash_data flash_imx074 = { .flash_type = MSM_CAMERA_FLASH_LED, #ifdef CONFIG_MSM_CAMERA_FLASH .flash_src = &msm_flash_src #endif }; static struct msm_camera_csi_lane_params imx074_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = { .mount_angle = 90, .cam_vreg = msm_8960_back_cam_vreg, .num_vreg = ARRAY_SIZE(msm_8960_back_cam_vreg), .gpio_conf = &msm_8960_back_cam_gpio_conf, .csi_lane_params = &imx074_csi_lane_params, }; static struct i2c_board_info imx074_eeprom_i2c_info = { I2C_BOARD_INFO("imx074_eeprom", 0x34 << 1), }; static struct msm_eeprom_info imx074_eeprom_info = { .board_info = &imx074_eeprom_i2c_info, .bus_id = MSM_8960_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = { .sensor_name = "imx074", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx074, .strobe_flash_data = &strobe_flash_xenon, .sensor_platform_info = &sensor_board_info_imx074, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_0_info, .eeprom_info = &imx074_eeprom_info, }; static struct camera_vreg_t msm_8960_mt9m114_vreg[] = { {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2800000, 300000}, }; static struct msm_camera_sensor_flash_data flash_mt9m114 = { .flash_type = MSM_CAMERA_FLASH_NONE }; static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x1, }; static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = { .mount_angle = 90, .cam_vreg = msm_8960_mt9m114_vreg, .num_vreg = ARRAY_SIZE(msm_8960_mt9m114_vreg), .gpio_conf = &msm_8960_front_cam_gpio_conf, .csi_lane_params = &mt9m114_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = { .sensor_name = "mt9m114", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_mt9m114, .sensor_platform_info = &sensor_board_info_mt9m114, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = YUV_SENSOR, }; static struct msm_camera_sensor_flash_data flash_ov2720 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params ov2720_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x3, }; static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = { .mount_angle = 0, .cam_vreg = msm_8960_front_cam_vreg, .num_vreg = ARRAY_SIZE(msm_8960_front_cam_vreg), .gpio_conf = &msm_8960_front_cam_gpio_conf, .csi_lane_params = &ov2720_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = { .sensor_name = "ov2720", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_ov2720, .sensor_platform_info = &sensor_board_info_ov2720, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; static struct camera_vreg_t msm_8960_s5k3l1yx_vreg[] = { {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vaf", REG_LDO, 2800000, 2800000, 300000}, }; static struct msm_camera_sensor_flash_data flash_s5k3l1yx = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = { .mount_angle = 0, .cam_vreg = msm_8960_s5k3l1yx_vreg, .num_vreg = ARRAY_SIZE(msm_8960_s5k3l1yx_vreg), .gpio_conf = &msm_8960_back_cam_gpio_conf, .csi_lane_params = &s5k3l1yx_csi_lane_params, }; static struct msm_actuator_info msm_act_main_cam_2_info = { .board_info = &msm_act_main_cam_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_2, .bus_id = MSM_8960_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = { .sensor_name = "s5k3l1yx", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_s5k3l1yx, .sensor_platform_info = &sensor_board_info_s5k3l1yx, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_2_info, }; static struct msm_camera_csi_lane_params imx091_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct camera_vreg_t msm_8960_imx091_vreg[] = { {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2800000, 300000}, {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vio", REG_VS, 0, 0, 0}, }; static struct msm_camera_sensor_flash_data flash_imx091 = { .flash_type = MSM_CAMERA_FLASH_LED, #ifdef CONFIG_MSM_CAMERA_FLASH .flash_src = &msm_flash_src #endif }; static struct msm_camera_sensor_platform_info sensor_board_info_imx091 = { .mount_angle = 0, .cam_vreg = msm_8960_imx091_vreg, .num_vreg = ARRAY_SIZE(msm_8960_imx091_vreg), .gpio_conf = &msm_8960_back_cam_gpio_conf, .csi_lane_params = &imx091_csi_lane_params, }; static struct i2c_board_info imx091_eeprom_i2c_info = { I2C_BOARD_INFO("imx091_eeprom", 0x21), }; static struct msm_eeprom_info imx091_eeprom_info = { .board_info = &imx091_eeprom_i2c_info, .bus_id = MSM_8960_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx091_data = { .sensor_name = "imx091", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx091, .sensor_platform_info = &sensor_board_info_imx091, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_1_info, .eeprom_info = &imx091_eeprom_info, }; static struct pm8xxx_mpp_config_data privacy_light_on_config = { .type = PM8XXX_MPP_TYPE_SINK, .level = PM8XXX_MPP_CS_OUT_5MA, .control = PM8XXX_MPP_CS_CTRL_MPP_LOW_EN, }; static struct pm8xxx_mpp_config_data privacy_light_off_config = { .type = PM8XXX_MPP_TYPE_SINK, .level = PM8XXX_MPP_CS_OUT_5MA, .control = PM8XXX_MPP_CS_CTRL_DISABLE, }; static int32_t msm_camera_8960_ext_power_ctrl(int enable) { int rc = 0; if (enable) { rc = pm8xxx_mpp_config(PM8921_MPP_PM_TO_SYS(12), &privacy_light_on_config); } else { rc = pm8xxx_mpp_config(PM8921_MPP_PM_TO_SYS(12), &privacy_light_off_config); } return rc; } static struct platform_device msm_camera_server = { .name = "msm_cam_server", .id = 0, }; void __init msm8960_init_cam(void) { msm_gpiomux_install(msm8960_cam_common_configs, ARRAY_SIZE(msm8960_cam_common_configs)); if (machine_is_msm8960_cdp()) { msm_gpiomux_install(msm8960_cdp_flash_configs, ARRAY_SIZE(msm8960_cdp_flash_configs)); msm_flash_src._fsrc.ext_driver_src.led_en = GPIO_CAM_GP_LED_EN1; msm_flash_src._fsrc.ext_driver_src.led_flash_en = GPIO_CAM_GP_LED_EN2; #if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \ defined(CONFIG_GPIO_SX150X_MODULE)) msm_flash_src._fsrc.ext_driver_src.expander_info = cam_expander_info; #endif } if (machine_is_msm8960_liquid()) { struct msm_camera_sensor_info *s_info; s_info = &msm_camera_sensor_imx074_data; s_info->sensor_platform_info->mount_angle = 180; s_info = &msm_camera_sensor_ov2720_data; s_info->sensor_platform_info->ext_power_ctrl = msm_camera_8960_ext_power_ctrl; } if (machine_is_msm8960_fluid()) { msm_camera_sensor_imx091_data.sensor_platform_info-> mount_angle = 270; } platform_device_register(&msm_camera_server); platform_device_register(&msm8960_device_csiphy0); platform_device_register(&msm8960_device_csiphy1); platform_device_register(&msm8960_device_csiphy2); platform_device_register(&msm8960_device_csid0); platform_device_register(&msm8960_device_csid1); platform_device_register(&msm8960_device_csid2); platform_device_register(&msm8960_device_ispif); platform_device_register(&msm8960_device_vfe); platform_device_register(&msm8960_device_vpe); } #ifdef CONFIG_I2C static struct i2c_board_info msm8960_camera_i2c_boardinfo[] = { { I2C_BOARD_INFO("imx074", 0x1A), .platform_data = &msm_camera_sensor_imx074_data, }, { I2C_BOARD_INFO("ov2720", 0x6C), .platform_data = &msm_camera_sensor_ov2720_data, }, { I2C_BOARD_INFO("mt9m114", 0x48), .platform_data = &msm_camera_sensor_mt9m114_data, }, { I2C_BOARD_INFO("s5k3l1yx", 0x20), .platform_data = &msm_camera_sensor_s5k3l1yx_data, }, #ifdef CONFIG_MSM_CAMERA_FLASH_SC628A { I2C_BOARD_INFO("sc628a", 0x6E), }, #endif { I2C_BOARD_INFO("imx091", 0x34), .platform_data = &msm_camera_sensor_imx091_data, }, }; struct msm_camera_board_info msm8960_camera_board_info = { .board_info = msm8960_camera_i2c_boardinfo, .num_i2c_board_info = ARRAY_SIZE(msm8960_camera_i2c_boardinfo), }; #endif #endif
gpl-2.0
ankon/openwrt
tools/flock/src/flock.c
538
8416
/* ----------------------------------------------------------------------- * * * Copyright 2003-2005 H. Peter Anvin - All Rights Reserved * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom * the Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall * be included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * ----------------------------------------------------------------------- */ #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <getopt.h> #include <signal.h> #include <ctype.h> #include <string.h> #include <paths.h> #include <sysexits.h> #include <sys/types.h> #include <sys/file.h> #include <sys/time.h> #include <sys/wait.h> #define PACKAGE_STRING "util-linux-ng 2.18" #define _(x) (x) static const struct option long_options[] = { { "shared", 0, NULL, 's' }, { "exclusive", 0, NULL, 'x' }, { "unlock", 0, NULL, 'u' }, { "nonblocking", 0, NULL, 'n' }, { "nb", 0, NULL, 'n' }, { "timeout", 1, NULL, 'w' }, { "wait", 1, NULL, 'w' }, { "close", 0, NULL, 'o' }, { "help", 0, NULL, 'h' }, { "version", 0, NULL, 'V' }, { 0, 0, 0, 0 } }; const char *program; static void usage(int ex) { fputs("flock (" PACKAGE_STRING ")\n", stderr); fprintf(stderr, _("Usage: %1$s [-sxun][-w #] fd#\n" " %1$s [-sxon][-w #] file [-c] command...\n" " %1$s [-sxon][-w #] directory [-c] command...\n" " -s --shared Get a shared lock\n" " -x --exclusive Get an exclusive lock\n" " -u --unlock Remove a lock\n" " -n --nonblock Fail rather than wait\n" " -w --timeout Wait for a limited amount of time\n" " -o --close Close file descriptor before running command\n" " -c --command Run a single command string through the shell\n" " -h --help Display this text\n" " -V --version Display version\n"), program); exit(ex); } static sig_atomic_t timeout_expired = 0; static void timeout_handler(int sig) { (void)sig; timeout_expired = 1; } static char * strtotimeval(const char *str, struct timeval *tv) { char *s; long fs; /* Fractional seconds */ int i; tv->tv_sec = strtol(str, &s, 10); fs = 0; if ( *s == '.' ) { s++; for ( i = 0 ; i < 6 ; i++ ) { if ( !isdigit(*s) ) break; fs *= 10; fs += *s++ - '0'; } for ( ; i < 6; i++ ) fs *= 10; while ( isdigit(*s) ) s++; } tv->tv_usec = fs; return s; } int main(int argc, char *argv[]) { struct itimerval timeout, old_timer; int have_timeout = 0; int type = LOCK_EX; int block = 0; int fd = -1; int opt, ix; int do_close = 0; int err; int status; char *eon; char **cmd_argv = NULL, *sh_c_argv[4]; const char *filename = NULL; struct sigaction sa, old_sa; program = argv[0]; if ( argc < 2 ) usage(EX_USAGE); memset(&timeout, 0, sizeof timeout); optopt = 0; while ( (opt = getopt_long(argc, argv, "+sexnouw:hV?", long_options, &ix)) != EOF ) { switch(opt) { case 's': type = LOCK_SH; break; case 'e': case 'x': type = LOCK_EX; break; case 'u': type = LOCK_UN; break; case 'o': do_close = 1; break; case 'n': block = LOCK_NB; break; case 'w': have_timeout = 1; eon = strtotimeval(optarg, &timeout.it_value); if ( *eon ) usage(EX_USAGE); break; case 'V': printf("flock (%s)\n", PACKAGE_STRING); exit(0); default: /* optopt will be set if this was an unrecognized option, i.e. *not* 'h' or '?' */ usage(optopt ? EX_USAGE : 0); break; } } if ( argc > optind+1 ) { /* Run command */ if ( !strcmp(argv[optind+1], "-c") || !strcmp(argv[optind+1], "--command") ) { if ( argc != optind+3 ) { fprintf(stderr, _("%s: %s requires exactly one command argument\n"), program, argv[optind+1]); exit(EX_USAGE); } cmd_argv = sh_c_argv; cmd_argv[0] = getenv("SHELL"); if ( !cmd_argv[0] || !*cmd_argv[0] ) cmd_argv[0] = _PATH_BSHELL; cmd_argv[1] = "-c"; cmd_argv[2] = argv[optind+2]; cmd_argv[3] = 0; } else { cmd_argv = &argv[optind+1]; } filename = argv[optind]; fd = open(filename, O_RDONLY|O_NOCTTY|O_CREAT, 0666); /* Linux doesn't like O_CREAT on a directory, even though it should be a no-op */ if (fd < 0 && errno == EISDIR) fd = open(filename, O_RDONLY|O_NOCTTY); if ( fd < 0 ) { err = errno; fprintf(stderr, _("%s: cannot open lock file %s: %s\n"), program, argv[optind], strerror(err)); exit((err == ENOMEM||err == EMFILE||err == ENFILE) ? EX_OSERR : (err == EROFS||err == ENOSPC) ? EX_CANTCREAT : EX_NOINPUT); } } else if (optind < argc) { /* Use provided file descriptor */ fd = (int)strtol(argv[optind], &eon, 10); if ( *eon || !argv[optind] ) { fprintf(stderr, _("%s: bad number: %s\n"), program, argv[optind]); exit(EX_USAGE); } } else { /* Bad options */ fprintf(stderr, _("%s: requires file descriptor, file or directory\n"), program); exit(EX_USAGE); } if ( have_timeout ) { if ( timeout.it_value.tv_sec == 0 && timeout.it_value.tv_usec == 0 ) { /* -w 0 is equivalent to -n; this has to be special-cased because setting an itimer to zero means disabled! */ have_timeout = 0; block = LOCK_NB; } else { memset(&sa, 0, sizeof sa); sa.sa_handler = timeout_handler; sa.sa_flags = SA_RESETHAND; sigaction(SIGALRM, &sa, &old_sa); setitimer(ITIMER_REAL, &timeout, &old_timer); } } while ( flock(fd, type|block) ) { switch( (err = errno) ) { case EWOULDBLOCK: /* -n option set and failed to lock */ exit(1); case EINTR: /* Signal received */ if ( timeout_expired ) exit(1); /* -w option set and failed to lock */ continue; /* otherwise try again */ default: /* Other errors */ if ( filename ) fprintf(stderr, "%s: %s: %s\n", program, filename, strerror(err)); else fprintf(stderr, "%s: %d: %s\n", program, fd, strerror(err)); exit((err == ENOLCK||err == ENOMEM) ? EX_OSERR : EX_DATAERR); } } if ( have_timeout ) { setitimer(ITIMER_REAL, &old_timer, NULL); /* Cancel itimer */ sigaction(SIGALRM, &old_sa, NULL); /* Cancel signal handler */ } status = 0; if ( cmd_argv ) { pid_t w, f; /* Clear any inherited settings */ signal(SIGCHLD, SIG_DFL); f = fork(); if ( f < 0 ) { err = errno; fprintf(stderr, _("%s: fork failed: %s\n"), program, strerror(err)); exit(EX_OSERR); } else if ( f == 0 ) { if ( do_close ) close(fd); err = errno; execvp(cmd_argv[0], cmd_argv); /* execvp() failed */ fprintf(stderr, "%s: %s: %s\n", program, cmd_argv[0], strerror(err)); _exit((err == ENOMEM) ? EX_OSERR: EX_UNAVAILABLE); } else { do { w = waitpid(f, &status, 0); if (w == -1 && errno != EINTR) break; } while ( w != f ); if (w == -1) { err = errno; status = EXIT_FAILURE; fprintf(stderr, "%s: waitpid failed: %s\n", program, strerror(err)); } else if ( WIFEXITED(status) ) status = WEXITSTATUS(status); else if ( WIFSIGNALED(status) ) status = WTERMSIG(status) + 128; else status = EX_OSERR; /* WTF? */ } } return status; }
gpl-2.0
spica234/HP-2X-V20Q
drivers/media/video/hexium_gemini.c
538
15271
/* hexium_gemini.c - v4l2 driver for Hexium Gemini frame grabber cards Visit http://www.mihu.de/linux/saa7146/ and follow the link to "hexium" for further details about this card. Copyright (C) 2003 Michael Hunold <michael@mihu.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define DEBUG_VARIABLE debug #include <media/saa7146_vv.h> static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "debug verbosity"); /* global variables */ static int hexium_num; #define HEXIUM_GEMINI 4 #define HEXIUM_GEMINI_DUAL 5 #define HEXIUM_INPUTS 9 static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = { { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0 }, }; #define HEXIUM_AUDIOS 0 struct hexium_data { s8 adr; u8 byte; }; #define HEXIUM_CONTROLS 1 static struct v4l2_queryctrl hexium_controls[] = { { V4L2_CID_PRIVATE_BASE, V4L2_CTRL_TYPE_BOOLEAN, "B/W", 0, 1, 1, 0, 0 }, }; #define HEXIUM_GEMINI_V_1_0 1 #define HEXIUM_GEMINI_DUAL_V_1_0 2 struct hexium { int type; struct video_device *video_dev; struct i2c_adapter i2c_adapter; int cur_input; /* current input */ v4l2_std_id cur_std; /* current standard */ int cur_bw; /* current black/white status */ }; /* Samsung KS0127B decoder default registers */ static u8 hexium_ks0127b[0x100]={ /*00*/ 0x00,0x52,0x30,0x40,0x01,0x0C,0x2A,0x10, /*08*/ 0x00,0x00,0x00,0x60,0x00,0x00,0x0F,0x06, /*10*/ 0x00,0x00,0xE4,0xC0,0x00,0x00,0x00,0x00, /*18*/ 0x14,0x9B,0xFE,0xFF,0xFC,0xFF,0x03,0x22, /*20*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*28*/ 0x00,0x00,0x00,0x00,0x00,0x2C,0x9B,0x00, /*30*/ 0x00,0x00,0x10,0x80,0x80,0x10,0x80,0x80, /*38*/ 0x01,0x04,0x00,0x00,0x00,0x29,0xC0,0x00, /*40*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*48*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*50*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*58*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*60*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*68*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*70*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*78*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*80*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*88*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*90*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*98*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*A0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*A8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*B0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*B8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*C0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*C8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*D0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*D8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*E0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*E8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*F0*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*F8*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }; static struct hexium_data hexium_pal[] = { { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF } }; static struct hexium_data hexium_pal_bw[] = { { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF } }; static struct hexium_data hexium_ntsc[] = { { 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF } }; static struct hexium_data hexium_ntsc_bw[] = { { 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF } }; static struct hexium_data hexium_secam[] = { { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF } }; static struct hexium_data hexium_input_select[] = { { 0x02, 0x60 }, { 0x02, 0x64 }, { 0x02, 0x61 }, { 0x02, 0x65 }, { 0x02, 0x62 }, { 0x02, 0x66 }, { 0x02, 0x68 }, { 0x02, 0x69 }, { 0x02, 0x6A }, }; /* fixme: h_offset = 0 for Hexium Gemini *Dual*, which are currently *not* supported*/ static struct saa7146_standard hexium_standards[] = { { .name = "PAL", .id = V4L2_STD_PAL, .v_offset = 28, .v_field = 288, .h_offset = 1, .h_pixels = 680, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 28, .v_field = 240, .h_offset = 1, .h_pixels = 640, .v_max_out = 480, .h_max_out = 640, }, { .name = "SECAM", .id = V4L2_STD_SECAM, .v_offset = 28, .v_field = 288, .h_offset = 1, .h_pixels = 720, .v_max_out = 576, .h_max_out = 768, } }; /* bring hardware to a sane state. this has to be done, just in case someone wants to capture from this device before it has been properly initialized. the capture engine would badly fail, because no valid signal arrives on the saa7146, thus leading to timeouts and stuff. */ static int hexium_init_done(struct saa7146_dev *dev) { struct hexium *hexium = (struct hexium *) dev->ext_priv; union i2c_smbus_data data; int i = 0; DEB_D(("hexium_init_done called.\n")); /* initialize the helper ics to useful values */ for (i = 0; i < sizeof(hexium_ks0127b); i++) { data.byte = hexium_ks0127b[i]; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) { printk("hexium_gemini: hexium_init_done() failed for address 0x%02x\n", i); } } return 0; } static int hexium_set_input(struct hexium *hexium, int input) { union i2c_smbus_data data; DEB_D((".\n")); data.byte = hexium_input_select[input].byte; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, hexium_input_select[input].adr, I2C_SMBUS_BYTE_DATA, &data)) { return -1; } return 0; } static int hexium_set_standard(struct hexium *hexium, struct hexium_data *vdec) { union i2c_smbus_data data; int i = 0; DEB_D((".\n")); while (vdec[i].adr != -1) { data.byte = vdec[i].byte; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x6c, 0, I2C_SMBUS_WRITE, vdec[i].adr, I2C_SMBUS_BYTE_DATA, &data)) { printk("hexium_init_done: hexium_set_standard() failed for address 0x%02x\n", i); return -1; } i++; } return 0; } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) { DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index)); if (i->index >= HEXIUM_INPUTS) return -EINVAL; memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input)); DEB_D(("v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n", i->index)); return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; *input = hexium->cur_input; DEB_D(("VIDIOC_G_INPUT: %d\n", *input)); return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; DEB_EE(("VIDIOC_S_INPUT %d.\n", input)); if (input < 0 || input >= HEXIUM_INPUTS) return -EINVAL; hexium->cur_input = input; hexium_set_input(hexium, input); return 0; } /* the saa7146 provides some controls (brightness, contrast, saturation) which gets registered *after* this function. because of this we have to return with a value != 0 even if the function succeded.. */ static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; int i; for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) { if (hexium_controls[i].id == qc->id) { *qc = hexium_controls[i]; DEB_D(("VIDIOC_QUERYCTRL %d.\n", qc->id)); return 0; } } return dev->ext_vv_data->core_ops->vidioc_queryctrl(file, fh, qc); } static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; int i; for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) { if (hexium_controls[i].id == vc->id) break; } if (i < 0) return dev->ext_vv_data->core_ops->vidioc_g_ctrl(file, fh, vc); if (vc->id == V4L2_CID_PRIVATE_BASE) { vc->value = hexium->cur_bw; DEB_D(("VIDIOC_G_CTRL BW:%d.\n", vc->value)); return 0; } return -EINVAL; } static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; int i = 0; for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) { if (hexium_controls[i].id == vc->id) break; } if (i < 0) return dev->ext_vv_data->core_ops->vidioc_s_ctrl(file, fh, vc); if (vc->id == V4L2_CID_PRIVATE_BASE) hexium->cur_bw = vc->value; DEB_D(("VIDIOC_S_CTRL BW:%d.\n", hexium->cur_bw)); if (0 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) { hexium_set_standard(hexium, hexium_pal); return 0; } if (0 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) { hexium_set_standard(hexium, hexium_ntsc); return 0; } if (0 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std) { hexium_set_standard(hexium, hexium_secam); return 0; } if (1 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) { hexium_set_standard(hexium, hexium_pal_bw); return 0; } if (1 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) { hexium_set_standard(hexium, hexium_ntsc_bw); return 0; } if (1 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std) /* fixme: is there no bw secam mode? */ return -EINVAL; return -EINVAL; } static struct saa7146_ext_vv vv_data; /* this function only gets called when the probing was successful */ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct hexium *hexium = (struct hexium *) dev->ext_priv; DEB_EE((".\n")); hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); if (NULL == hexium) { printk("hexium_gemini: not enough kernel memory in hexium_attach().\n"); return -ENOMEM; } dev->ext_priv = hexium; /* enable i2c-port pins */ saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26)); hexium->i2c_adapter = (struct i2c_adapter) { .class = I2C_CLASS_TV_ANALOG, .name = "hexium gemini", }; saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if (i2c_add_adapter(&hexium->i2c_adapter) < 0) { DEB_S(("cannot register i2c-device. skipping.\n")); kfree(hexium); return -EFAULT; } /* set HWControl GPIO number 2 */ saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI); saa7146_write(dev, DD1_INIT, 0x07000700); saa7146_write(dev, DD1_STREAM_B, 0x00000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); /* the rest */ hexium->cur_input = 0; hexium_init_done(dev); hexium_set_standard(hexium, hexium_pal); hexium->cur_std = V4L2_STD_PAL; hexium_set_input(hexium, 0); hexium->cur_input = 0; saa7146_vv_init(dev, &vv_data); vv_data.ops.vidioc_queryctrl = vidioc_queryctrl; vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl; vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl; vv_data.ops.vidioc_enum_input = vidioc_enum_input; vv_data.ops.vidioc_g_input = vidioc_g_input; vv_data.ops.vidioc_s_input = vidioc_s_input; if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) { printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n"); return -1; } printk("hexium_gemini: found 'hexium gemini' frame grabber-%d.\n", hexium_num); hexium_num++; return 0; } static int hexium_detach(struct saa7146_dev *dev) { struct hexium *hexium = (struct hexium *) dev->ext_priv; DEB_EE(("dev:%p\n", dev)); saa7146_unregister_device(&hexium->video_dev, dev); saa7146_vv_release(dev); hexium_num--; i2c_del_adapter(&hexium->i2c_adapter); kfree(hexium); return 0; } static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *std) { struct hexium *hexium = (struct hexium *) dev->ext_priv; if (V4L2_STD_PAL == std->id) { hexium_set_standard(hexium, hexium_pal); hexium->cur_std = V4L2_STD_PAL; return 0; } else if (V4L2_STD_NTSC == std->id) { hexium_set_standard(hexium, hexium_ntsc); hexium->cur_std = V4L2_STD_NTSC; return 0; } else if (V4L2_STD_SECAM == std->id) { hexium_set_standard(hexium, hexium_secam); hexium->cur_std = V4L2_STD_SECAM; return 0; } return -1; } static struct saa7146_extension hexium_extension; static struct saa7146_pci_extension_data hexium_gemini_4bnc = { .ext_priv = "Hexium Gemini (4 BNC)", .ext = &hexium_extension, }; static struct saa7146_pci_extension_data hexium_gemini_dual_4bnc = { .ext_priv = "Hexium Gemini Dual (4 BNC)", .ext = &hexium_extension, }; static struct pci_device_id pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x17c8, .subdevice = 0x2401, .driver_data = (unsigned long) &hexium_gemini_4bnc, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x17c8, .subdevice = 0x2402, .driver_data = (unsigned long) &hexium_gemini_dual_4bnc, }, { .vendor = 0, } }; MODULE_DEVICE_TABLE(pci, pci_tbl); static struct saa7146_ext_vv vv_data = { .inputs = HEXIUM_INPUTS, .capabilities = 0, .stds = &hexium_standards[0], .num_stds = sizeof(hexium_standards) / sizeof(struct saa7146_standard), .std_callback = &std_callback, }; static struct saa7146_extension hexium_extension = { .name = "hexium gemini", .flags = SAA7146_USE_I2C_IRQ, .pci_tbl = &pci_tbl[0], .module = THIS_MODULE, .attach = hexium_attach, .detach = hexium_detach, .irq_mask = 0, .irq_func = NULL, }; static int __init hexium_init_module(void) { if (0 != saa7146_register_extension(&hexium_extension)) { DEB_S(("failed to register extension.\n")); return -ENODEV; } return 0; } static void __exit hexium_cleanup_module(void) { saa7146_unregister_extension(&hexium_extension); } module_init(hexium_init_module); module_exit(hexium_cleanup_module); MODULE_DESCRIPTION("video4linux-2 driver for Hexium Gemini frame grabber cards"); MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_LICENSE("GPL");
gpl-2.0
simone201/neak-kernel-sgs2
drivers/mtd/maps/redwood.c
1562
4159
/* * drivers/mtd/maps/redwood.c * * FLASH map for the IBM Redwood 4/5/6 boards. * * Author: MontaVista Software, Inc. <source@mvista.com> * * 2001-2003 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #if !defined (CONFIG_REDWOOD_6) #define WINDOW_ADDR 0xffc00000 #define WINDOW_SIZE 0x00400000 #define RW_PART0_OF 0 #define RW_PART0_SZ 0x10000 #define RW_PART1_OF RW_PART0_SZ #define RW_PART1_SZ 0x200000 - 0x10000 #define RW_PART2_OF 0x200000 #define RW_PART2_SZ 0x10000 #define RW_PART3_OF 0x210000 #define RW_PART3_SZ 0x200000 - (0x10000 + 0x20000) #define RW_PART4_OF 0x3e0000 #define RW_PART4_SZ 0x20000 static struct mtd_partition redwood_flash_partitions[] = { { .name = "Redwood OpenBIOS Vital Product Data", .offset = RW_PART0_OF, .size = RW_PART0_SZ, .mask_flags = MTD_WRITEABLE /* force read-only */ }, { .name = "Redwood kernel", .offset = RW_PART1_OF, .size = RW_PART1_SZ }, { .name = "Redwood OpenBIOS non-volatile storage", .offset = RW_PART2_OF, .size = RW_PART2_SZ, .mask_flags = MTD_WRITEABLE /* force read-only */ }, { .name = "Redwood filesystem", .offset = RW_PART3_OF, .size = RW_PART3_SZ }, { .name = "Redwood OpenBIOS", .offset = RW_PART4_OF, .size = RW_PART4_SZ, .mask_flags = MTD_WRITEABLE /* force read-only */ } }; #else /* CONFIG_REDWOOD_6 */ /* FIXME: the window is bigger - armin */ #define WINDOW_ADDR 0xff800000 #define WINDOW_SIZE 0x00800000 #define RW_PART0_OF 0 #define RW_PART0_SZ 0x400000 /* 4 MiB data */ #define RW_PART1_OF RW_PART0_OF + RW_PART0_SZ #define RW_PART1_SZ 0x10000 /* 64K VPD */ #define RW_PART2_OF RW_PART1_OF + RW_PART1_SZ #define RW_PART2_SZ 0x400000 - (0x10000 + 0x20000) #define RW_PART3_OF RW_PART2_OF + RW_PART2_SZ #define RW_PART3_SZ 0x20000 static struct mtd_partition redwood_flash_partitions[] = { { .name = "Redwood filesystem", .offset = RW_PART0_OF, .size = RW_PART0_SZ }, { .name = "Redwood OpenBIOS Vital Product Data", .offset = RW_PART1_OF, .size = RW_PART1_SZ, .mask_flags = MTD_WRITEABLE /* force read-only */ }, { .name = "Redwood kernel", .offset = RW_PART2_OF, .size = RW_PART2_SZ }, { .name = "Redwood OpenBIOS", .offset = RW_PART3_OF, .size = RW_PART3_SZ, .mask_flags = MTD_WRITEABLE /* force read-only */ } }; #endif /* CONFIG_REDWOOD_6 */ struct map_info redwood_flash_map = { .name = "IBM Redwood", .size = WINDOW_SIZE, .bankwidth = 2, .phys = WINDOW_ADDR, }; #define NUM_REDWOOD_FLASH_PARTITIONS ARRAY_SIZE(redwood_flash_partitions) static struct mtd_info *redwood_mtd; static int __init init_redwood_flash(void) { int err; printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR); redwood_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); if (!redwood_flash_map.virt) { printk("init_redwood_flash: failed to ioremap\n"); return -EIO; } simple_map_init(&redwood_flash_map); redwood_mtd = do_map_probe("cfi_probe",&redwood_flash_map); if (redwood_mtd) { redwood_mtd->owner = THIS_MODULE; err = add_mtd_partitions(redwood_mtd, redwood_flash_partitions, NUM_REDWOOD_FLASH_PARTITIONS); if (err) { printk("init_redwood_flash: add_mtd_partitions failed\n"); iounmap(redwood_flash_map.virt); } return err; } iounmap(redwood_flash_map.virt); return -ENXIO; } static void __exit cleanup_redwood_flash(void) { if (redwood_mtd) { del_mtd_partitions(redwood_mtd); /* moved iounmap after map_destroy - armin */ map_destroy(redwood_mtd); iounmap((void *)redwood_flash_map.virt); } } module_init(init_redwood_flash); module_exit(cleanup_redwood_flash); MODULE_LICENSE("GPL"); MODULE_AUTHOR("MontaVista Software <source@mvista.com>"); MODULE_DESCRIPTION("MTD map driver for the IBM Redwood reference boards");
gpl-2.0
nitroglycerine33/Note2_Tmo-Att-Vzw_Kernel
drivers/net/igb/igb_main.c
1562
193896
/******************************************************************************* Intel(R) Gigabit Ethernet Linux driver Copyright(c) 2007-2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/netdevice.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/net_tstamp.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/aer.h> #include <linux/prefetch.h> #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif #include "igb.h" #define MAJ 3 #define MIN 0 #define BUILD 6 #define KFIX 2 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" __stringify(KFIX) char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, }; static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, igb_pci_tbl); void igb_reset(struct igb_adapter *); static int igb_setup_all_tx_resources(struct igb_adapter *); static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); static void igb_init_hw_timer(struct igb_adapter *adapter); static int igb_sw_init(struct igb_adapter *); static int igb_open(struct net_device *); static int igb_close(struct net_device *); static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_tx_ring(struct igb_ring *); static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter); static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *); static irqreturn_t igb_msix_ring(int irq, void *); #ifdef CONFIG_IGB_DCA static void igb_update_dca(struct igb_q_vector *); static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ static bool igb_clean_tx_irq(struct igb_q_vector *); static int igb_poll(struct napi_struct *, int); static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); static void igb_vlan_rx_register(struct net_device *, struct vlan_group *); static void igb_vlan_rx_add_vid(struct net_device *, u16); static void igb_vlan_rx_kill_vid(struct net_device *, u16); static void igb_restore_vlan(struct igb_adapter *); static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); static void igb_ping_all_vfs(struct igb_adapter *); static void igb_msg_task(struct igb_adapter *); static void igb_vmm_control(struct igb_adapter *); static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PM static int igb_suspend(struct pci_dev *, pm_message_t); static int igb_resume(struct pci_dev *); #endif static void igb_shutdown(struct pci_dev *); #ifdef CONFIG_IGB_DCA static int igb_notify_dca(struct notifier_block *, unsigned long, void *); static struct notifier_block dca_notifier = { .notifier_call = igb_notify_dca, .next = NULL, .priority = 0 }; #endif #ifdef CONFIG_NET_POLL_CONTROLLER /* for netdump / net console */ static void igb_netpoll(struct net_device *); #endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs = 0; module_param(max_vfs, uint, 0); MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " "per physical function"); #endif /* CONFIG_PCI_IOV */ static pci_ers_result_t igb_io_error_detected(struct pci_dev *, pci_channel_state_t); static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); static void igb_io_resume(struct pci_dev *); static struct pci_error_handlers igb_err_handler = { .error_detected = igb_io_error_detected, .slot_reset = igb_io_slot_reset, .resume = igb_io_resume, }; static struct pci_driver igb_driver = { .name = igb_driver_name, .id_table = igb_pci_tbl, .probe = igb_probe, .remove = __devexit_p(igb_remove), #ifdef CONFIG_PM /* Power Management Hooks */ .suspend = igb_suspend, .resume = igb_resume, #endif .shutdown = igb_shutdown, .err_handler = &igb_err_handler }; MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); struct igb_reg_info { u32 ofs; char *name; }; static const struct igb_reg_info igb_reg_info_tbl[] = { /* General Registers */ {E1000_CTRL, "CTRL"}, {E1000_STATUS, "STATUS"}, {E1000_CTRL_EXT, "CTRL_EXT"}, /* Interrupt Registers */ {E1000_ICR, "ICR"}, /* RX Registers */ {E1000_RCTL, "RCTL"}, {E1000_RDLEN(0), "RDLEN"}, {E1000_RDH(0), "RDH"}, {E1000_RDT(0), "RDT"}, {E1000_RXDCTL(0), "RXDCTL"}, {E1000_RDBAL(0), "RDBAL"}, {E1000_RDBAH(0), "RDBAH"}, /* TX Registers */ {E1000_TCTL, "TCTL"}, {E1000_TDBAL(0), "TDBAL"}, {E1000_TDBAH(0), "TDBAH"}, {E1000_TDLEN(0), "TDLEN"}, {E1000_TDH(0), "TDH"}, {E1000_TDT(0), "TDT"}, {E1000_TXDCTL(0), "TXDCTL"}, {E1000_TDFH, "TDFH"}, {E1000_TDFT, "TDFT"}, {E1000_TDFHS, "TDFHS"}, {E1000_TDFPC, "TDFPC"}, /* List Terminator */ {} }; /* * igb_regdump - register printout routine */ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) { int n = 0; char rname[16]; u32 regs[8]; switch (reginfo->ofs) { case E1000_RDLEN(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDLEN(n)); break; case E1000_RDH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDH(n)); break; case E1000_RDT(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDT(n)); break; case E1000_RXDCTL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RXDCTL(n)); break; case E1000_RDBAL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDBAL(n)); break; case E1000_RDBAH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDBAH(n)); break; case E1000_TDBAL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDBAL(n)); break; case E1000_TDBAH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDBAH(n)); break; case E1000_TDLEN(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDLEN(n)); break; case E1000_TDH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDH(n)); break; case E1000_TDT(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDT(n)); break; case E1000_TXDCTL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TXDCTL(n)); break; default: printk(KERN_INFO "%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); printk(KERN_INFO "%-15s ", rname); for (n = 0; n < 4; n++) printk(KERN_CONT "%08x ", regs[n]); printk(KERN_CONT "\n"); } /* * igb_dump - Print registers, tx-rings and rx-rings */ static void igb_dump(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct igb_reg_info *reginfo; int n = 0; struct igb_ring *tx_ring; union e1000_adv_tx_desc *tx_desc; struct my_u0 { u64 a; u64 b; } *u0; struct igb_buffer *buffer_info; struct igb_ring *rx_ring; union e1000_adv_rx_desc *rx_desc; u32 staterr; int i = 0; if (!netif_msg_hw(adapter)) return; /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); printk(KERN_INFO "Device Name state " "trans_start last_rx\n"); printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", netdev->name, netdev->state, netdev->trans_start, netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); printk(KERN_INFO " Register Name Value\n"); for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; reginfo->name; reginfo++) { igb_regdump(hw, reginfo); } /* Print TX Ring Summary */ if (!netdev || !netif_running(netdev)) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" " leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, (u64)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (u64)buffer_info->time_stamp); } /* Print TX Rings */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); /* Transmit Descriptor Formats * * Advanced Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +--------------------------------------------------------------+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | * +--------------------------------------------------------------+ * 63 46 45 40 39 38 36 35 32 31 24 15 0 */ for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "T [desc] [address 63:0 ] " "[PlPOCIStDDM Ln] [bi->dma ] " "leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; u0 = (struct my_u0 *)tx_desc; printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" " %04X %3X %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (u64)buffer_info->time_stamp, buffer_info->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) printk(KERN_CONT " NTC/U\n"); else if (i == tx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == tx_ring->next_to_clean) printk(KERN_CONT " NTC\n"); else printk(KERN_CONT "\n"); if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt(buffer_info->dma), buffer_info->length, true); } } /* Print RX Rings Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; printk(KERN_INFO " %5d %5X %5X\n", n, rx_ring->next_to_use, rx_ring->next_to_clean); } /* Print RX Rings */ if (!netif_msg_rx_status(adapter)) goto exit; dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); /* Advanced Receive Descriptor (Read) Format * 63 1 0 * +-----------------------------------------------------+ * 0 | Packet Buffer Address [63:1] |A0/NSE| * +----------------------------------------------+------+ * 8 | Header Buffer Address [63:1] | DD | * +-----------------------------------------------------+ * * * Advanced Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 30 21 20 17 16 4 3 0 * +------------------------------------------------------+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | * | Checksum Ident | | | | Type | Type | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "R [desc] [ PktBuf A0] " "[ HeadBuf DD] [bi->dma ] [bi->skb] " "<-- Adv Rx Read format\n"); printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " "[vl er S cks ln] ---------------- [bi->skb] " "<-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ printk(KERN_INFO "RWB[0x%03X] %016llX " "%016llX ---------------- %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), buffer_info->skb); } else { printk(KERN_INFO "R [0x%03X] %016llX " "%016llX %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, buffer_info->skb); if (netif_msg_pktdata(adapter)) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt(buffer_info->dma), rx_ring->rx_buffer_len, true); if (rx_ring->rx_buffer_len < IGB_RXBUFFER_1024) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt( buffer_info->page_dma + buffer_info->page_offset), PAGE_SIZE/2, true); } } if (i == rx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == rx_ring->next_to_clean) printk(KERN_CONT " NTC\n"); else printk(KERN_CONT "\n"); } } exit: return; } /** * igb_read_clock - read raw cycle counter (to be used by time counter) */ static cycle_t igb_read_clock(const struct cyclecounter *tc) { struct igb_adapter *adapter = container_of(tc, struct igb_adapter, cycles); struct e1000_hw *hw = &adapter->hw; u64 stamp = 0; int shift = 0; /* * The timestamp latches on lowest register read. For the 82580 * the lowest register is SYSTIMR instead of SYSTIML. However we never * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. */ if (hw->mac.type == e1000_82580) { stamp = rd32(E1000_SYSTIMR) >> 8; shift = IGB_82580_TSYNC_SHIFT; } stamp |= (u64)rd32(E1000_SYSTIML) << shift; stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); return stamp; } /** * igb_get_hw_dev - return device * used by hardware layer to print debugging information **/ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) { struct igb_adapter *adapter = hw->back; return adapter->netdev; } /** * igb_init_module - Driver Registration Routine * * igb_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init igb_init_module(void) { int ret; printk(KERN_INFO "%s - version %s\n", igb_driver_string, igb_driver_version); printk(KERN_INFO "%s\n", igb_copyright); #ifdef CONFIG_IGB_DCA dca_register_notify(&dca_notifier); #endif ret = pci_register_driver(&igb_driver); return ret; } module_init(igb_init_module); /** * igb_exit_module - Driver Exit Cleanup Routine * * igb_exit_module is called just before the driver is removed * from memory. **/ static void __exit igb_exit_module(void) { #ifdef CONFIG_IGB_DCA dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&igb_driver); } module_exit(igb_exit_module); #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) /** * igb_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize * * Once we know the feature-set enabled for the device, we'll cache * the register offset the descriptor ring is assigned to. **/ static void igb_cache_ring_register(struct igb_adapter *adapter) { int i = 0, j = 0; u32 rbase_offset = adapter->vfs_allocated_count; switch (adapter->hw.mac.type) { case e1000_82576: /* The queues are allocated for virtualization such that VF 0 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. * In order to avoid collision we start at the first free queue * and continue consuming queues in the same sequence */ if (adapter->vfs_allocated_count) { for (; i < adapter->rss_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); } case e1000_82575: case e1000_82580: case e1000_i350: default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; for (; j < adapter->num_tx_queues; j++) adapter->tx_ring[j]->reg_idx = rbase_offset + j; break; } } static void igb_free_queues(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) { kfree(adapter->tx_ring[i]); adapter->tx_ring[i] = NULL; } for (i = 0; i < adapter->num_rx_queues; i++) { kfree(adapter->rx_ring[i]); adapter->rx_ring[i] = NULL; } adapter->num_rx_queues = 0; adapter->num_tx_queues = 0; } /** * igb_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. **/ static int igb_alloc_queues(struct igb_adapter *adapter) { struct igb_ring *ring; int i; for (i = 0; i < adapter->num_tx_queues; i++) { ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->tx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* For 82575, context index must be unique per ring. */ if (adapter->hw.mac.type == e1000_82575) ring->flags = IGB_RING_FLAG_TX_CTX_IDX; adapter->tx_ring[i] = ring; } for (i = 0; i < adapter->num_rx_queues; i++) { ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->rx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ /* set flag indicating ring supports SCTP checksum offload */ if (adapter->hw.mac.type >= e1000_82576) ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; adapter->rx_ring[i] = ring; } igb_cache_ring_register(adapter); return 0; err: igb_free_queues(adapter); return -ENOMEM; } #define IGB_N0_QUEUE -1 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) { u32 msixbm = 0; struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; u32 ivar, index; int rx_queue = IGB_N0_QUEUE; int tx_queue = IGB_N0_QUEUE; if (q_vector->rx_ring) rx_queue = q_vector->rx_ring->reg_idx; if (q_vector->tx_ring) tx_queue = q_vector->tx_ring->reg_idx; switch (hw->mac.type) { case e1000_82575: /* The 82575 assigns vectors using a bitmask, which matches the bitmask for the EICR/EIMS/EIMC registers. To assign one or more queues to a vector, we write the appropriate bits into the MSIXBM register for that vector. */ if (rx_queue > IGB_N0_QUEUE) msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; if (!adapter->msix_entries && msix_vector == 0) msixbm |= E1000_EIMS_OTHER; array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; break; case e1000_82576: /* 82576 uses a table-based method for assigning vectors. Each queue has a single entry in the table to which we write a vector number along with a "valid" bit. Sadly, the layout of the table is somewhat counterintuitive. */ if (rx_queue > IGB_N0_QUEUE) { index = (rx_queue & 0x7); ivar = array_rd32(E1000_IVAR0, index); if (rx_queue < 8) { /* vector goes into low byte of register */ ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } else { /* vector goes into third byte of register */ ivar = ivar & 0xFF00FFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 16; } array_wr32(E1000_IVAR0, index, ivar); } if (tx_queue > IGB_N0_QUEUE) { index = (tx_queue & 0x7); ivar = array_rd32(E1000_IVAR0, index); if (tx_queue < 8) { /* vector goes into second byte of register */ ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } else { /* vector goes into high byte of register */ ivar = ivar & 0x00FFFFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 24; } array_wr32(E1000_IVAR0, index, ivar); } q_vector->eims_value = 1 << msix_vector; break; case e1000_82580: case e1000_i350: /* 82580 uses the same table-based approach as 82576 but has fewer entries as a result we carry over for queues greater than 4. */ if (rx_queue > IGB_N0_QUEUE) { index = (rx_queue >> 1); ivar = array_rd32(E1000_IVAR0, index); if (rx_queue & 0x1) { /* vector goes into third byte of register */ ivar = ivar & 0xFF00FFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 16; } else { /* vector goes into low byte of register */ ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } array_wr32(E1000_IVAR0, index, ivar); } if (tx_queue > IGB_N0_QUEUE) { index = (tx_queue >> 1); ivar = array_rd32(E1000_IVAR0, index); if (tx_queue & 0x1) { /* vector goes into high byte of register */ ivar = ivar & 0x00FFFFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 24; } else { /* vector goes into second byte of register */ ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } array_wr32(E1000_IVAR0, index, ivar); } q_vector->eims_value = 1 << msix_vector; break; default: BUG(); break; } /* add q_vector eims value to global eims_enable_mask */ adapter->eims_enable_mask |= q_vector->eims_value; /* configure q_vector to set itr on first interrupt */ q_vector->set_itr = 1; } /** * igb_configure_msix - Configure MSI-X hardware * * igb_configure_msix sets up the hardware to properly * generate MSI-X interrupts. **/ static void igb_configure_msix(struct igb_adapter *adapter) { u32 tmp; int i, vector = 0; struct e1000_hw *hw = &adapter->hw; adapter->eims_enable_mask = 0; /* set vector for other causes, i.e. link changes */ switch (hw->mac.type) { case e1000_82575: tmp = rd32(E1000_CTRL_EXT); /* enable MSI-X PBA support*/ tmp |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask interrupts upon ICR read. */ tmp |= E1000_CTRL_EXT_EIAME; tmp |= E1000_CTRL_EXT_IRCA; wr32(E1000_CTRL_EXT, tmp); /* enable msix_other interrupt */ array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); adapter->eims_other = E1000_EIMS_OTHER; break; case e1000_82576: case e1000_82580: case e1000_i350: /* Turn on MSI-X capability first, or our settings * won't stick. And it will take days to debug. */ wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_PBA | E1000_GPIE_EIAME | E1000_GPIE_NSICR); /* enable msix_other interrupt */ adapter->eims_other = 1 << vector; tmp = (vector++ | E1000_IVAR_VALID) << 8; wr32(E1000_IVAR_MISC, tmp); break; default: /* do nothing, since nothing else supports MSI-X */ break; } /* switch (hw->mac.type) */ adapter->eims_enable_mask |= adapter->eims_other; for (i = 0; i < adapter->num_q_vectors; i++) igb_assign_vector(adapter->q_vector[i], vector++); wrfl(); } /** * igb_request_msix - Initialize MSI-X interrupts * * igb_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. **/ static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; int i, err = 0, vector = 0; err = request_irq(adapter->msix_entries[vector].vector, igb_msix_other, 0, netdev->name, adapter); if (err) goto out; vector++; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); if (q_vector->rx_ring && q_vector->tx_ring) sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, q_vector->rx_ring->queue_index); else if (q_vector->tx_ring) sprintf(q_vector->name, "%s-tx-%u", netdev->name, q_vector->tx_ring->queue_index); else if (q_vector->rx_ring) sprintf(q_vector->name, "%s-rx-%u", netdev->name, q_vector->rx_ring->queue_index); else sprintf(q_vector->name, "%s-unused", netdev->name); err = request_irq(adapter->msix_entries[vector].vector, igb_msix_ring, 0, q_vector->name, q_vector); if (err) goto out; vector++; } igb_configure_msix(adapter); return 0; out: return err; } static void igb_reset_interrupt_capability(struct igb_adapter *adapter) { if (adapter->msix_entries) { pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else if (adapter->flags & IGB_FLAG_HAS_MSI) { pci_disable_msi(adapter->pdev); } } /** * igb_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void igb_free_q_vectors(struct igb_adapter *adapter) { int v_idx; for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; adapter->q_vector[v_idx] = NULL; if (!q_vector) continue; netif_napi_del(&q_vector->napi); kfree(q_vector); } adapter->num_q_vectors = 0; } /** * igb_clear_interrupt_scheme - reset the device to a state of no interrupts * * This function resets the device so that it has 0 rx queues, tx queues, and * MSI-X interrupts allocated. */ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) { igb_free_queues(adapter); igb_free_q_vectors(adapter); igb_reset_interrupt_capability(adapter); } /** * igb_set_interrupt_capability - set MSI or MSI-X if supported * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int igb_set_interrupt_capability(struct igb_adapter *adapter) { int err; int numvecs, i; /* Number of supported queues. */ adapter->num_rx_queues = adapter->rss_queues; if (adapter->vfs_allocated_count) adapter->num_tx_queues = 1; else adapter->num_tx_queues = adapter->rss_queues; /* start with one vector for every rx queue */ numvecs = adapter->num_rx_queues; /* if tx handler is separate add 1 for every tx queue */ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) numvecs += adapter->num_tx_queues; /* store the number of vectors reserved for queues */ adapter->num_q_vectors = numvecs; /* add 1 vector for link status interrupts */ numvecs++; adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) goto msi_only; for (i = 0; i < numvecs; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix(adapter->pdev, adapter->msix_entries, numvecs); if (err == 0) goto out; igb_reset_interrupt_capability(adapter); /* If we can't do MSI-X, try MSI */ msi_only: #ifdef CONFIG_PCI_IOV /* disable SR-IOV for non MSI-X configurations */ if (adapter->vf_data) { struct e1000_hw *hw = &adapter->hw; /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); msleep(500); kfree(adapter->vf_data); adapter->vf_data = NULL; wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); msleep(100); dev_info(&adapter->pdev->dev, "IOV Disabled\n"); } #endif adapter->vfs_allocated_count = 0; adapter->rss_queues = 1; adapter->flags |= IGB_FLAG_QUEUE_PAIRS; adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_q_vectors = 1; if (!pci_enable_msi(adapter->pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; out: /* Notify the stack of the (possibly) reduced queue counts. */ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); return netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); } /** * igb_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int igb_alloc_q_vectors(struct igb_adapter *adapter) { struct igb_q_vector *q_vector; struct e1000_hw *hw = &adapter->hw; int v_idx; for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; q_vector->itr_register = hw->hw_addr + E1000_EITR(0); q_vector->itr_val = IGB_START_ITR; netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); adapter->q_vector[v_idx] = q_vector; } return 0; err_out: igb_free_q_vectors(adapter); return -ENOMEM; } static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, int ring_idx, int v_idx) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; q_vector->rx_ring = adapter->rx_ring[ring_idx]; q_vector->rx_ring->q_vector = q_vector; q_vector->itr_val = adapter->rx_itr_setting; if (q_vector->itr_val && q_vector->itr_val <= 3) q_vector->itr_val = IGB_START_ITR; } static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, int ring_idx, int v_idx) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; q_vector->tx_ring = adapter->tx_ring[ring_idx]; q_vector->tx_ring->q_vector = q_vector; q_vector->itr_val = adapter->tx_itr_setting; if (q_vector->itr_val && q_vector->itr_val <= 3) q_vector->itr_val = IGB_START_ITR; } /** * igb_map_ring_to_vector - maps allocated queues to vectors * * This function maps the recently allocated queues to vectors. **/ static int igb_map_ring_to_vector(struct igb_adapter *adapter) { int i; int v_idx = 0; if ((adapter->num_q_vectors < adapter->num_rx_queues) || (adapter->num_q_vectors < adapter->num_tx_queues)) return -ENOMEM; if (adapter->num_q_vectors >= (adapter->num_rx_queues + adapter->num_tx_queues)) { for (i = 0; i < adapter->num_rx_queues; i++) igb_map_rx_ring_to_vector(adapter, i, v_idx++); for (i = 0; i < adapter->num_tx_queues; i++) igb_map_tx_ring_to_vector(adapter, i, v_idx++); } else { for (i = 0; i < adapter->num_rx_queues; i++) { if (i < adapter->num_tx_queues) igb_map_tx_ring_to_vector(adapter, i, v_idx); igb_map_rx_ring_to_vector(adapter, i, v_idx++); } for (; i < adapter->num_tx_queues; i++) igb_map_tx_ring_to_vector(adapter, i, v_idx++); } return 0; } /** * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors * * This function initializes the interrupts and allocates all of the queues. **/ static int igb_init_interrupt_scheme(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int err; err = igb_set_interrupt_capability(adapter); if (err) return err; err = igb_alloc_q_vectors(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); goto err_alloc_q_vectors; } err = igb_alloc_queues(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); goto err_alloc_queues; } err = igb_map_ring_to_vector(adapter); if (err) { dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); goto err_map_queues; } return 0; err_map_queues: igb_free_queues(adapter); err_alloc_queues: igb_free_q_vectors(adapter); err_alloc_q_vectors: igb_reset_interrupt_capability(adapter); return err; } /** * igb_request_irq - initialize interrupts * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int igb_request_irq(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err = 0; if (adapter->msix_entries) { err = igb_request_msix(adapter); if (!err) goto request_done; /* fall back to MSI */ igb_clear_interrupt_scheme(adapter); if (!pci_enable_msi(adapter->pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); adapter->num_tx_queues = 1; adapter->num_rx_queues = 1; adapter->num_q_vectors = 1; err = igb_alloc_q_vectors(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); goto request_done; } err = igb_alloc_queues(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); igb_free_q_vectors(adapter); goto request_done; } igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); } else { igb_assign_vector(adapter->q_vector[0], 0); } if (adapter->flags & IGB_FLAG_HAS_MSI) { err = request_irq(adapter->pdev->irq, igb_intr_msi, 0, netdev->name, adapter); if (!err) goto request_done; /* fall back to legacy interrupts */ igb_reset_interrupt_capability(adapter); adapter->flags &= ~IGB_FLAG_HAS_MSI; } err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED, netdev->name, adapter); if (err) dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", err); request_done: return err; } static void igb_free_irq(struct igb_adapter *adapter) { if (adapter->msix_entries) { int vector = 0, i; free_irq(adapter->msix_entries[vector++].vector, adapter); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; free_irq(adapter->msix_entries[vector++].vector, q_vector); } } else { free_irq(adapter->pdev->irq, adapter); } } /** * igb_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static void igb_irq_disable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; /* * we need to be careful when disabling interrupts. The VFs are also * mapped into these registers and so clearing the bits can cause * issues on the VF drivers so we only need to clear what we set */ if (adapter->msix_entries) { u32 regval = rd32(E1000_EIAM); wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); wr32(E1000_EIMC, adapter->eims_enable_mask); regval = rd32(E1000_EIAC); wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); } wr32(E1000_IAM, 0); wr32(E1000_IMC, ~0); wrfl(); if (adapter->msix_entries) { int i; for (i = 0; i < adapter->num_q_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } else { synchronize_irq(adapter->pdev->irq); } } /** * igb_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static void igb_irq_enable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) { u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC; u32 regval = rd32(E1000_EIAC); wr32(E1000_EIAC, regval | adapter->eims_enable_mask); regval = rd32(E1000_EIAM); wr32(E1000_EIAM, regval | adapter->eims_enable_mask); wr32(E1000_EIMS, adapter->eims_enable_mask); if (adapter->vfs_allocated_count) { wr32(E1000_MBVFIMR, 0xFF); ims |= E1000_IMS_VMMB; } if (adapter->hw.mac.type == e1000_82580) ims |= E1000_IMS_DRSTA; wr32(E1000_IMS, ims); } else { wr32(E1000_IMS, IMS_ENABLE_MASK | E1000_IMS_DRSTA); wr32(E1000_IAM, IMS_ENABLE_MASK | E1000_IMS_DRSTA); } } static void igb_update_mng_vlan(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */ igb_vfta_set(hw, vid, true); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; } if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && (vid != old_vid) && !vlan_group_get_device(adapter->vlgrp, old_vid)) { /* remove VID from filter table */ igb_vfta_set(hw, old_vid, false); } } /** * igb_release_hw_control - release control of the h/w to f/w * @adapter: address of board private structure * * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. * **/ static void igb_release_hw_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; /* Let firmware take over control of h/w */ ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } /** * igb_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. * **/ static void igb_get_hw_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; /* Let firmware know the driver has taken over */ ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } /** * igb_configure - configure the hardware for RX and TX * @adapter: private board structure **/ static void igb_configure(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; igb_get_hw_control(adapter); igb_set_rx_mode(netdev); igb_restore_vlan(adapter); igb_setup_tctl(adapter); igb_setup_mrqc(adapter); igb_setup_rctl(adapter); igb_configure_tx(adapter); igb_configure_rx(adapter); igb_rx_fifo_flush_82575(&adapter->hw); /* call igb_desc_unused which always leaves * at least 1 descriptor unused to make sure * next_to_use != next_to_clean */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); } } /** * igb_power_up_link - Power up the phy/serdes link * @adapter: address of board private structure **/ void igb_power_up_link(struct igb_adapter *adapter) { if (adapter->hw.phy.media_type == e1000_media_type_copper) igb_power_up_phy_copper(&adapter->hw); else igb_power_up_serdes_link_82575(&adapter->hw); } /** * igb_power_down_link - Power down the phy/serdes link * @adapter: address of board private structure */ static void igb_power_down_link(struct igb_adapter *adapter) { if (adapter->hw.phy.media_type == e1000_media_type_copper) igb_power_down_phy_copper_82575(&adapter->hw); else igb_shutdown_serdes_link_82575(&adapter->hw); } /** * igb_up - Open the interface and prepare it to handle traffic * @adapter: board private structure **/ int igb_up(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; /* hardware has been reset, we need to reload some things */ igb_configure(adapter); clear_bit(__IGB_DOWN, &adapter->state); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; napi_enable(&q_vector->napi); } if (adapter->msix_entries) igb_configure_msix(adapter); else igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ rd32(E1000_ICR); igb_irq_enable(adapter); /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { u32 reg_data = rd32(E1000_CTRL_EXT); reg_data |= E1000_CTRL_EXT_PFRSTD; wr32(E1000_CTRL_EXT, reg_data); } netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); return 0; } void igb_down(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; u32 tctl, rctl; int i; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__IGB_DOWN, &adapter->state); /* disable receives in the hardware */ rctl = rd32(E1000_RCTL); wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ tctl = rd32(E1000_TCTL); tctl &= ~E1000_TCTL_EN; wr32(E1000_TCTL, tctl); /* flush both disables and wait for them to finish */ wrfl(); msleep(10); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; napi_disable(&q_vector->napi); } igb_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); netif_carrier_off(netdev); /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); spin_unlock(&adapter->stats64_lock); adapter->link_speed = 0; adapter->link_duplex = 0; if (!pci_channel_offline(adapter->pdev)) igb_reset(adapter); igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); #ifdef CONFIG_IGB_DCA /* since we reset the hardware DCA settings were cleared */ igb_setup_dca(adapter); #endif } void igb_reinit_locked(struct igb_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); igb_down(adapter); igb_up(adapter); clear_bit(__IGB_RESETTING, &adapter->state); } void igb_reset(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; u32 pba = 0, tx_space, min_tx_space, min_rx_space; u16 hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. */ switch (mac->type) { case e1000_i350: case e1000_82580: pba = rd32(E1000_RXPBS); pba = igb_rxpbs_adjust_82580(pba); break; case e1000_82576: pba = rd32(E1000_RXPBS); pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82575: default: pba = E1000_PBA_34K; break; } if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && (mac->type < e1000_82576)) { /* adjust PBA for jumbo frames */ wr32(E1000_PBA, pba); /* To maintain wire speed transmits, the Tx FIFO should be * large enough to accommodate two full transmit packets, * rounded up to the next 1KB and expressed in KB. Likewise, * the Rx FIFO should be large enough to accommodate at least * one full receive packet and is similarly rounded up and * expressed in KB. */ pba = rd32(E1000_PBA); /* upper 16 bits has Tx packet buffer allocation size in KB */ tx_space = pba >> 16; /* lower 16 bits has Rx packet buffer allocation size in KB */ pba &= 0xffff; /* the tx fifo also stores 16 bytes of information about the tx * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ min_rx_space = adapter->max_frame_size; min_rx_space = ALIGN(min_rx_space, 1024); min_rx_space >>= 10; /* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO * allocation, take space away from current Rx allocation */ if (tx_space < min_tx_space && ((min_tx_space - tx_space) < pba)) { pba = pba - (min_tx_space - tx_space); /* if short on rx space, rx wins and must trump tx * adjustment */ if (pba < min_rx_space) pba = min_rx_space; } wr32(E1000_PBA, pba); } /* flow control settings */ /* The high water mark must be low enough to fit one full frame * (or the size used for early receive) above it in the Rx FIFO. * Set it to the lower of: * - 90% of the Rx FIFO size, or * - the full Rx FIFO size minus one full frame */ hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - 2 * adapter->max_frame_size)); fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; fc->pause_time = 0xFFFF; fc->send_xon = 1; fc->current_mode = fc->requested_mode; /* disable receive for all VFs and wait one second */ if (adapter->vfs_allocated_count) { int i; for (i = 0 ; i < adapter->vfs_allocated_count; i++) adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; /* ping all the active vfs to let them know we are going down */ igb_ping_all_vfs(adapter); /* disable transmits and receives */ wr32(E1000_VFRE, 0); wr32(E1000_VFTE, 0); } /* Allow time for pending master requests to run */ hw->mac.ops.reset_hw(hw); wr32(E1000_WUC, 0); if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); if (hw->mac.type > e1000_82580) { if (adapter->flags & IGB_FLAG_DMAC) { u32 reg; /* * DMA Coalescing high water mark needs to be higher * than * the * Rx threshold. The Rx threshold is * currently * pba - 6, so we * should use a high water * mark of pba * - 4. */ hwm = (pba - 4) << 10; reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); /* watchdog timer= +-1000 usec in 32usec intervals */ reg |= (1000 >> 5); wr32(E1000_DMACR, reg); /* no lower threshold to disable coalescing(smart fifb) * -UTRESH=0*/ wr32(E1000_DMCRTRH, 0); /* set hwm to PBA - 2 * max frame size */ wr32(E1000_FCRTC, hwm); /* * This sets the time to wait before requesting tran- * sition to * low power state to number of usecs needed * to receive 1 512 * byte frame at gigabit line rate */ reg = rd32(E1000_DMCTLX); reg |= IGB_DMCTLX_DCFLUSH_DIS; /* Delay 255 usec before entering Lx state. */ reg |= 0xFF; wr32(E1000_DMCTLX, reg); /* free space in Tx packet buffer to wake from DMAC */ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); /* make low power state decision controlled by DMAC */ reg = rd32(E1000_PCIEMISC); reg |= E1000_PCIEMISC_LX_DECISION; wr32(E1000_PCIEMISC, reg); } /* end if IGB_FLAG_DMAC set */ } if (hw->mac.type == e1000_82580) { u32 reg = rd32(E1000_PCIEMISC); wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); } if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); igb_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); igb_get_phy_info(hw); } static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, .ndo_start_xmit = igb_xmit_frame_adv, .ndo_get_stats64 = igb_get_stats64, .ndo_set_rx_mode = igb_set_rx_mode, .ndo_set_multicast_list = igb_set_rx_mode, .ndo_set_mac_address = igb_set_mac, .ndo_change_mtu = igb_change_mtu, .ndo_do_ioctl = igb_ioctl, .ndo_tx_timeout = igb_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_register = igb_vlan_rx_register, .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, .ndo_set_vf_mac = igb_ndo_set_vf_mac, .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, .ndo_get_vf_config = igb_ndo_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = igb_netpoll, #endif }; /** * igb_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in igb_pci_tbl * * Returns 0 on success, negative on failure * * igb_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int __devinit igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct igb_adapter *adapter; struct e1000_hw *hw; u16 eeprom_data = 0; s32 ret_val; static int global_quad_port_a; /* global quad port a indication */ const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; unsigned long mmio_start, mmio_len; int err, pci_using_dac; u16 eeprom_apme_mask = IGB_EEPROM_APME; u8 part_str[E1000_PBANUM_LENGTH]; /* Catch broken hardware that put the wrong VF device ID in * the PCIe SR-IOV capability. */ if (pdev->is_virtfn) { WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev), pdev->vendor, pdev->device); return -EINVAL; } err = pci_enable_device_mem(pdev); if (err) return err; pci_using_dac = 0; err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); goto err_dma; } } } err = pci_request_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM), igb_driver_name); if (err) goto err_pci_reg; pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); err = -ENOMEM; netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_ABS_MAX_TX_QUEUES); if (!netdev) goto err_alloc_etherdev; SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); err = -EIO; hw->hw_addr = ioremap(mmio_start, mmio_len); if (!hw->hw_addr) goto err_ioremap; netdev->netdev_ops = &igb_netdev_ops; igb_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; /* Copy the default MAC, PHY and NVM function pointers */ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); /* Initialize skew-specific constants */ err = ei->get_invariants(hw); if (err) goto err_sw_init; /* setup the private structure */ err = igb_sw_init(adapter); if (err) goto err_sw_init; igb_get_bus_info_pcie(hw); hw->phy.autoneg_wait_to_complete = false; /* Copper options */ if (hw->phy.media_type == e1000_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; hw->phy.disable_polarity_correction = false; hw->phy.ms_type = e1000_ms_hw_default; } if (igb_check_reset_block(hw)) dev_info(&pdev->dev, "PHY reset is blocked due to SOL/IDER session.\n"); netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_GRO; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; } if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CSUM; adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); /* before reading the NVM, reset the controller to put the device in a * known good starting state */ hw->mac.ops.reset_hw(hw); /* make sure the NVM is good */ if (hw->nvm.ops.validate(hw) < 0) { dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } /* copy the MAC address out of the NVM */ if (hw->mac.ops.read_mac_addr(hw)) dev_err(&pdev->dev, "NVM Read Error\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; } setup_timer(&adapter->watchdog_timer, igb_watchdog, (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, igb_update_phy_info, (unsigned long) adapter); INIT_WORK(&adapter->reset_task, igb_reset_task); INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); /* Initialize link properties that are user-changeable */ adapter->fc_autoneg = true; hw->mac.autoneg = true; hw->phy.autoneg_advertised = 0x2f; hw->fc.requested_mode = e1000_fc_default; hw->fc.current_mode = e1000_fc_default; igb_validate_mdi_setting(hw); /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, * enable the ACPI Magic Packet filter */ if (hw->bus.func == 0) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); else if (hw->mac.type >= e1000_82580) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &eeprom_data); else if (hw->bus.func == 1) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); if (eeprom_data & eeprom_apme_mask) adapter->eeprom_wol |= E1000_WUFC_MAG; /* now that we have the eeprom settings, apply the special cases where * the eeprom may be wrong or the board simply won't support wake on * lan on a particular port */ switch (pdev->device) { case E1000_DEV_ID_82575GB_QUAD_COPPER: adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82575EB_FIBER_SERDES: case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; else adapter->flags |= IGB_FLAG_QUAD_PORT_A; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; } /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* reset the hardware with the new settings */ igb_reset(adapter); /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); } #endif /* do hw tstamp init after resetting */ igb_init_hw_timer(adapter); dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", netdev->name, ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : "unknown"), ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : "unknown"), netdev->dev_addr); ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); if (ret_val) strcpy(part_str, "Unknown"); dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); dev_info(&pdev->dev, "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", adapter->msix_entries ? "MSI-X" : (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); switch (hw->mac.type) { case e1000_i350: igb_set_eee_i350(hw); break; default: break; } return 0; err_register: igb_release_hw_control(adapter); err_eeprom: if (!igb_check_reset_block(hw)) igb_reset_phy(hw); if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: igb_clear_interrupt_scheme(adapter); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * igb_remove - Device Removal Routine * @pdev: PCI device information struct * * igb_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void __devexit igb_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. */ set_bit(__IGB_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); #ifdef CONFIG_IGB_DCA if (adapter->flags & IGB_FLAG_DCA_ENABLED) { dev_info(&pdev->dev, "DCA disabled\n"); dca_remove_requester(&pdev->dev); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } #endif /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ igb_release_hw_control(adapter); unregister_netdev(netdev); igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV /* reclaim resources allocated to VFs */ if (adapter->vf_data) { /* disable iov and allow time for transactions to clear */ pci_disable_sriov(pdev); msleep(500); kfree(adapter->vf_data); adapter->vf_data = NULL; wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); msleep(100); dev_info(&pdev->dev, "IOV Disabled\n"); } #endif iounmap(hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } /** * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space * @adapter: board private structure to initialize * * This function initializes the vf specific data storage and then attempts to * allocate the VFs. The reason for ordering it this way is because it is much * mor expensive time wise to disable SR-IOV than it is to allocate and free * the memory for the VFs. **/ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) { #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; if (adapter->vfs_allocated_count) { adapter->vf_data = kcalloc(adapter->vfs_allocated_count, sizeof(struct vf_data_storage), GFP_KERNEL); /* if allocation failed then we do not support SR-IOV */ if (!adapter->vf_data) { adapter->vfs_allocated_count = 0; dev_err(&pdev->dev, "Unable to allocate memory for VF " "Data Storage\n"); } } if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) { kfree(adapter->vf_data); adapter->vf_data = NULL; #endif /* CONFIG_PCI_IOV */ adapter->vfs_allocated_count = 0; #ifdef CONFIG_PCI_IOV } else { unsigned char mac_addr[ETH_ALEN]; int i; dev_info(&pdev->dev, "%d vfs allocated\n", adapter->vfs_allocated_count); for (i = 0; i < adapter->vfs_allocated_count; i++) { random_ether_addr(mac_addr); igb_set_vf_mac(adapter, i, mac_addr); } /* DMA Coalescing is not supported in IOV mode. */ if (adapter->flags & IGB_FLAG_DMAC) adapter->flags &= ~IGB_FLAG_DMAC; } #endif /* CONFIG_PCI_IOV */ } /** * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp * @adapter: board private structure to initialize * * igb_init_hw_timer initializes the function pointer and values for the hw * timer found in hardware. **/ static void igb_init_hw_timer(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; switch (hw->mac.type) { case e1000_i350: case e1000_82580: memset(&adapter->cycles, 0, sizeof(adapter->cycles)); adapter->cycles.read = igb_read_clock; adapter->cycles.mask = CLOCKSOURCE_MASK(64); adapter->cycles.mult = 1; /* * The 82580 timesync updates the system timer every 8ns by 8ns * and the value cannot be shifted. Instead we need to shift * the registers to generate a 64bit timer value. As a result * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by * 24 in order to generate a larger value for synchronization. */ adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; /* disable system timer temporarily by setting bit 31 */ wr32(E1000_TSAUXC, 0x80000000); wrfl(); /* Set registers so that rollover occurs soon to test this. */ wr32(E1000_SYSTIMR, 0x00000000); wr32(E1000_SYSTIML, 0x80000000); wr32(E1000_SYSTIMH, 0x000000FF); wrfl(); /* enable system timer by clearing bit 31 */ wr32(E1000_TSAUXC, 0x0); wrfl(); timecounter_init(&adapter->clock, &adapter->cycles, ktime_to_ns(ktime_get_real())); /* * Synchronize our NIC clock against system wall clock. NIC * time stamp reading requires ~3us per sample, each sample * was pretty stable even under load => only require 10 * samples for each offset comparison. */ memset(&adapter->compare, 0, sizeof(adapter->compare)); adapter->compare.source = &adapter->clock; adapter->compare.target = ktime_get_real; adapter->compare.num_samples = 10; timecompare_update(&adapter->compare, 0); break; case e1000_82576: /* * Initialize hardware timer: we keep it running just in case * that some program needs it later on. */ memset(&adapter->cycles, 0, sizeof(adapter->cycles)); adapter->cycles.read = igb_read_clock; adapter->cycles.mask = CLOCKSOURCE_MASK(64); adapter->cycles.mult = 1; /** * Scale the NIC clock cycle by a large factor so that * relatively small clock corrections can be added or * subtracted at each clock tick. The drawbacks of a large * factor are a) that the clock register overflows more quickly * (not such a big deal) and b) that the increment per tick has * to fit into 24 bits. As a result we need to use a shift of * 19 so we can fit a value of 16 into the TIMINCA register. */ adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; wr32(E1000_TIMINCA, (1 << E1000_TIMINCA_16NS_SHIFT) | (16 << IGB_82576_TSYNC_SHIFT)); /* Set registers so that rollover occurs soon to test this. */ wr32(E1000_SYSTIML, 0x00000000); wr32(E1000_SYSTIMH, 0xFF800000); wrfl(); timecounter_init(&adapter->clock, &adapter->cycles, ktime_to_ns(ktime_get_real())); /* * Synchronize our NIC clock against system wall clock. NIC * time stamp reading requires ~3us per sample, each sample * was pretty stable even under load => only require 10 * samples for each offset comparison. */ memset(&adapter->compare, 0, sizeof(adapter->compare)); adapter->compare.source = &adapter->clock; adapter->compare.target = ktime_get_real; adapter->compare.num_samples = 10; timecompare_update(&adapter->compare, 0); break; case e1000_82575: /* 82575 does not support timesync */ default: break; } } /** * igb_sw_init - Initialize general software structures (struct igb_adapter) * @adapter: board private structure to initialize * * igb_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int __devinit igb_sw_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); adapter->tx_ring_count = IGB_DEFAULT_TXD; adapter->rx_ring_count = IGB_DEFAULT_RXD; adapter->rx_itr_setting = IGB_DEFAULT_ITR; adapter->tx_itr_setting = IGB_DEFAULT_ITR; adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { case e1000_82576: case e1000_i350: if (max_vfs > 7) { dev_warn(&pdev->dev, "Maximum of 7 VFs per PF, using max\n"); adapter->vfs_allocated_count = 7; } else adapter->vfs_allocated_count = max_vfs; break; default: break; } #endif /* CONFIG_PCI_IOV */ adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); /* i350 cannot do RSS and SR-IOV at the same time */ if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) adapter->rss_queues = 1; /* * if rss_queues > 4 or vfs are going to be allocated with rss_queues * then we should combine the queues into a queue pair in order to * conserve interrupts due to limited supply */ if ((adapter->rss_queues > 4) || ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) adapter->flags |= IGB_FLAG_QUEUE_PAIRS; /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } igb_probe_vfs(adapter); /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); if (hw->mac.type == e1000_i350) adapter->flags &= ~IGB_FLAG_DMAC; set_bit(__IGB_DOWN, &adapter->state); return 0; } /** * igb_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int igb_open(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int err; int i; /* disallow open during test */ if (test_bit(__IGB_TESTING, &adapter->state)) return -EBUSY; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = igb_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = igb_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; igb_power_up_link(adapter); /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. */ igb_configure(adapter); err = igb_request_irq(adapter); if (err) goto err_req_irq; /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; napi_enable(&q_vector->napi); } /* Clear any pending interrupts. */ rd32(E1000_ICR); igb_irq_enable(adapter); /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { u32 reg_data = rd32(E1000_CTRL_EXT); reg_data |= E1000_CTRL_EXT_PFRSTD; wr32(E1000_CTRL_EXT, reg_data); } netif_tx_start_all_queues(netdev); /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); return 0; err_req_irq: igb_release_hw_control(adapter); igb_power_down_link(adapter); igb_free_all_rx_resources(adapter); err_setup_rx: igb_free_all_tx_resources(adapter); err_setup_tx: igb_reset(adapter); return err; } /** * igb_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the driver's control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static int igb_close(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); igb_down(adapter); igb_free_irq(adapter); igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); return 0; } /** * igb_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int igb_setup_tx_resources(struct igb_ring *tx_ring) { struct device *dev = tx_ring->dev; int size; size = sizeof(struct igb_buffer) * tx_ring->count; tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->buffer_info); dev_err(dev, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } /** * igb_setup_all_tx_resources - wrapper to allocate Tx resources * (Descriptors) for all queues * @adapter: board private structure * * Return 0 on success, negative on failure **/ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = igb_setup_tx_resources(adapter->tx_ring[i]); if (err) { dev_err(&pdev->dev, "Allocation for Tx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_tx_resources(adapter->tx_ring[i]); break; } } for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { int r_idx = i % adapter->num_tx_queues; adapter->multi_tx_table[i] = adapter->tx_ring[r_idx]; } return err; } /** * igb_setup_tctl - configure the transmit control registers * @adapter: Board private structure **/ void igb_setup_tctl(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 tctl; /* disable queue 0 which is enabled by default on 82575 and 82576 */ wr32(E1000_TXDCTL(0), 0); /* Program the Transmit Control Register */ tctl = rd32(E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); igb_config_collision_dist(hw); /* Enable transmits */ tctl |= E1000_TCTL_EN; wr32(E1000_TCTL, tctl); } /** * igb_configure_tx_ring - Configure transmit ring after Reset * @adapter: board private structure * @ring: tx ring to configure * * Configure a transmit ring after a reset. **/ void igb_configure_tx_ring(struct igb_adapter *adapter, struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; u32 txdctl; u64 tdba = ring->dma; int reg_idx = ring->reg_idx; /* disable the queue */ txdctl = rd32(E1000_TXDCTL(reg_idx)); wr32(E1000_TXDCTL(reg_idx), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); wrfl(); mdelay(10); wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), tdba & 0x00000000ffffffffULL); wr32(E1000_TDBAH(reg_idx), tdba >> 32); ring->head = hw->hw_addr + E1000_TDH(reg_idx); ring->tail = hw->hw_addr + E1000_TDT(reg_idx); writel(0, ring->head); writel(0, ring->tail); txdctl |= IGB_TX_PTHRESH; txdctl |= IGB_TX_HTHRESH << 8; txdctl |= IGB_TX_WTHRESH << 16; txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); } /** * igb_configure_tx - Configure transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void igb_configure_tx(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) igb_configure_tx_ring(adapter, adapter->tx_ring[i]); } /** * igb_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int igb_setup_rx_resources(struct igb_ring *rx_ring) { struct device *dev = rx_ring->dev; int size, desc_len; size = sizeof(struct igb_buffer) * rx_ring->count; rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) goto err; desc_len = sizeof(union e1000_adv_rx_desc); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * desc_len; rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; err: vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the receive descriptor" " ring\n"); return -ENOMEM; } /** * igb_setup_all_rx_resources - wrapper to allocate Rx resources * (Descriptors) for all queues * @adapter: board private structure * * Return 0 on success, negative on failure **/ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = igb_setup_rx_resources(adapter->rx_ring[i]); if (err) { dev_err(&pdev->dev, "Allocation for Rx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_rx_resources(adapter->rx_ring[i]); break; } } return err; } /** * igb_setup_mrqc - configure the multiple receive queue control registers * @adapter: Board private structure **/ static void igb_setup_mrqc(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; u32 j, num_rx_queues, shift = 0, shift2 = 0; union e1000_reta { u32 dword; u8 bytes[4]; } reta; static const u8 rsshash[40] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; /* Fill out hash function seeds */ for (j = 0; j < 10; j++) { u32 rsskey = rsshash[(j * 4)]; rsskey |= rsshash[(j * 4) + 1] << 8; rsskey |= rsshash[(j * 4) + 2] << 16; rsskey |= rsshash[(j * 4) + 3] << 24; array_wr32(E1000_RSSRK(0), j, rsskey); } num_rx_queues = adapter->rss_queues; if (adapter->vfs_allocated_count) { /* 82575 and 82576 supports 2 RSS queues for VMDq */ switch (hw->mac.type) { case e1000_i350: case e1000_82580: num_rx_queues = 1; shift = 0; break; case e1000_82576: shift = 3; num_rx_queues = 2; break; case e1000_82575: shift = 2; shift2 = 6; default: break; } } else { if (hw->mac.type == e1000_82575) shift = 6; } for (j = 0; j < (32 * 4); j++) { reta.bytes[j & 3] = (j % num_rx_queues) << shift; if (shift2) reta.bytes[j & 3] |= num_rx_queues << shift2; if ((j & 3) == 3) wr32(E1000_RETA(j >> 2), reta.dword); } /* * Disable raw packet checksumming so that RSS hash is placed in * descriptor on writeback. No need to enable TCP/UDP/IP checksum * offloads as they are enabled by default */ rxcsum = rd32(E1000_RXCSUM); rxcsum |= E1000_RXCSUM_PCSD; if (adapter->hw.mac.type >= e1000_82576) /* Enable Receive Checksum Offload for SCTP */ rxcsum |= E1000_RXCSUM_CRCOFL; /* Don't need to set TUOFL or IPOFL, they default to 1 */ wr32(E1000_RXCSUM, rxcsum); /* If VMDq is enabled then we set the appropriate mode for that, else * we default to RSS so that an RSS hash is calculated per packet even * if we are only using one queue */ if (adapter->vfs_allocated_count) { if (hw->mac.type > e1000_82575) { /* Set the default pool for the PF's first queue */ u32 vtctl = rd32(E1000_VT_CTL); vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | E1000_VT_CTL_DISABLE_DEF_POOL); vtctl |= adapter->vfs_allocated_count << E1000_VT_CTL_DEFAULT_POOL_SHIFT; wr32(E1000_VT_CTL, vtctl); } if (adapter->rss_queues > 1) mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; else mrqc = E1000_MRQC_ENABLE_VMDQ; } else { mrqc = E1000_MRQC_ENABLE_RSS_4Q; } igb_vmm_control(adapter); /* * Generate RSS hash based on TCP port numbers and/or * IPv4/v6 src and dst addresses since UDP cannot be * hashed reliably due to IP fragmentation */ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; wr32(E1000_MRQC, mrqc); } /** * igb_setup_rctl - configure the receive control registers * @adapter: Board private structure **/ void igb_setup_rctl(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl; rctl = rd32(E1000_RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* * enable stripping of CRC. It's unlikely this will break BMC * redirection as it did with e1000. Newer features require * that the HW strips the CRC. */ rctl |= E1000_RCTL_SECRC; /* disable store bad packets and clear size bits. */ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); /* enable LPE to prevent packets larger than max_frame_size */ rctl |= E1000_RCTL_LPE; /* disable queue 0 to prevent tail write w/o re-config */ wr32(E1000_RXDCTL(0), 0); /* Attention!!! For SR-IOV PF driver operations you must enable * queue drop for all VF and PF queues to prevent head of line blocking * if an un-trusted VF does not provide descriptors to hardware. */ if (adapter->vfs_allocated_count) { /* set all queue drop enable bits */ wr32(E1000_QDE, ALL_QUEUES); } wr32(E1000_RCTL, rctl); } static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, int vfn) { struct e1000_hw *hw = &adapter->hw; u32 vmolr; /* if it isn't the PF check to see if VFs are enabled and * increase the size to support vlan tags */ if (vfn < adapter->vfs_allocated_count && adapter->vf_data[vfn].vlans_enabled) size += VLAN_TAG_SIZE; vmolr = rd32(E1000_VMOLR(vfn)); vmolr &= ~E1000_VMOLR_RLPML_MASK; vmolr |= size | E1000_VMOLR_LPE; wr32(E1000_VMOLR(vfn), vmolr); return 0; } /** * igb_rlpml_set - set maximum receive packet size * @adapter: board private structure * * Configure maximum receivable packet size. **/ static void igb_rlpml_set(struct igb_adapter *adapter) { u32 max_frame_size = adapter->max_frame_size; struct e1000_hw *hw = &adapter->hw; u16 pf_id = adapter->vfs_allocated_count; if (adapter->vlgrp) max_frame_size += VLAN_TAG_SIZE; /* if vfs are enabled we set RLPML to the largest possible request * size and set the VMOLR RLPML to the size we need */ if (pf_id) { igb_set_vf_rlpml(adapter, max_frame_size, pf_id); max_frame_size = MAX_JUMBO_FRAME_SIZE; } wr32(E1000_RLPML, max_frame_size); } static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn, bool aupe) { struct e1000_hw *hw = &adapter->hw; u32 vmolr; /* * This register exists only on 82576 and newer so if we are older then * we should exit and do nothing */ if (hw->mac.type < e1000_82576) return; vmolr = rd32(E1000_VMOLR(vfn)); vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ if (aupe) vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ else vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ /* clear all bits that might not be set */ vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ /* * for VMDq only allow the VFs and pool 0 to accept broadcast and * multicast packets */ if (vfn <= adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ wr32(E1000_VMOLR(vfn), vmolr); } /** * igb_configure_rx_ring - Configure a receive ring after Reset * @adapter: board private structure * @ring: receive ring to be configured * * Configure the Rx unit of the MAC after a reset. **/ void igb_configure_rx_ring(struct igb_adapter *adapter, struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; u64 rdba = ring->dma; int reg_idx = ring->reg_idx; u32 srrctl, rxdctl; /* disable the queue */ rxdctl = rd32(E1000_RXDCTL(reg_idx)); wr32(E1000_RXDCTL(reg_idx), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); /* Set DMA base address registers */ wr32(E1000_RDBAL(reg_idx), rdba & 0x00000000ffffffffULL); wr32(E1000_RDBAH(reg_idx), rdba >> 32); wr32(E1000_RDLEN(reg_idx), ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ ring->head = hw->hw_addr + E1000_RDH(reg_idx); ring->tail = hw->hw_addr + E1000_RDT(reg_idx); writel(0, ring->head); writel(0, ring->tail); /* set descriptor configuration */ if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { srrctl = ALIGN(ring->rx_buffer_len, 64) << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; #else srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; #endif srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } else { srrctl = ALIGN(ring->rx_buffer_len, 1024) >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; } if (hw->mac.type == e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; wr32(E1000_SRRCTL(reg_idx), srrctl); /* set filtering for VMDQ pools */ igb_set_vmolr(adapter, reg_idx & 0x7, true); /* enable receive descriptor fetching */ rxdctl = rd32(E1000_RXDCTL(reg_idx)); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGB_RX_PTHRESH; rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; wr32(E1000_RXDCTL(reg_idx), rxdctl); } /** * igb_configure_rx - Configure receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void igb_configure_rx(struct igb_adapter *adapter) { int i; /* set UTA to appropriate mode */ igb_set_uta(adapter); /* set the correct pool for the PF default MAC address in entry 0 */ igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, adapter->vfs_allocated_count); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) igb_configure_rx_ring(adapter, adapter->rx_ring[i]); } /** * igb_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void igb_free_tx_resources(struct igb_ring *tx_ring) { igb_clean_tx_ring(tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; /* if not set, then don't free */ if (!tx_ring->desc) return; dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * igb_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ static void igb_free_all_tx_resources(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) igb_free_tx_resources(adapter->tx_ring[i]); } void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, struct igb_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) dma_unmap_page(tx_ring->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); else dma_unmap_single(tx_ring->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } buffer_info->time_stamp = 0; buffer_info->length = 0; buffer_info->next_to_watch = 0; buffer_info->mapped_as_page = false; } /** * igb_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void igb_clean_tx_ring(struct igb_ring *tx_ring) { struct igb_buffer *buffer_info; unsigned long size; unsigned int i; if (!tx_ring->buffer_info) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } size = sizeof(struct igb_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } /** * igb_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) igb_clean_tx_ring(adapter->tx_ring[i]); } /** * igb_free_rx_resources - Free Rx Resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void igb_free_rx_resources(struct igb_ring *rx_ring) { igb_clean_rx_ring(rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; /* if not set, then don't free */ if (!rx_ring->desc) return; dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * igb_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ static void igb_free_all_rx_resources(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) igb_free_rx_resources(adapter->rx_ring[i]); } /** * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ static void igb_clean_rx_ring(struct igb_ring *rx_ring) { struct igb_buffer *buffer_info; unsigned long size; unsigned int i; if (!rx_ring->buffer_info) return; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { dma_unmap_single(rx_ring->dev, buffer_info->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } if (buffer_info->page_dma) { dma_unmap_page(rx_ring->dev, buffer_info->page_dma, PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; } if (buffer_info->page) { put_page(buffer_info->page); buffer_info->page = NULL; buffer_info->page_offset = 0; } } size = sizeof(struct igb_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } /** * igb_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) igb_clean_rx_ring(adapter->rx_ring[i]); } /** * igb_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int igb_set_mac(struct net_device *netdev, void *p) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ igb_rar_set_qsel(adapter, hw->mac.addr, 0, adapter->vfs_allocated_count); return 0; } /** * igb_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure * * Writes multicast address list to the MTA hash table. * Returns: -ENOMEM on failure * 0 on no addresses written * X on writing X addresses to MTA **/ static int igb_write_mc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u8 *mta_list; int i; if (netdev_mc_empty(netdev)) { /* nothing to program, so clear mc list */ igb_update_mc_addr_list(hw, NULL, 0); igb_restore_vf_multicasts(adapter); return 0; } mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); if (!mta_list) return -ENOMEM; /* The shared function expects a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); igb_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); return netdev_mc_count(netdev); } /** * igb_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure * * Writes unicast address list to the RAR table. * Returns: -ENOMEM on failure/insufficient address space * 0 on no addresses written * X on writing X addresses to the RAR table **/ static int igb_write_uc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); int count = 0; /* return ENOMEM indicating insufficient memory for addresses */ if (netdev_uc_count(netdev) > rar_entries) return -ENOMEM; if (!netdev_uc_empty(netdev) && rar_entries) { struct netdev_hw_addr *ha; netdev_for_each_uc_addr(ha, netdev) { if (!rar_entries) break; igb_rar_set_qsel(adapter, ha->addr, rar_entries--, vfn); count++; } } /* write the addresses in reverse order to avoid write combining */ for (; rar_entries > 0 ; rar_entries--) { wr32(E1000_RAH(rar_entries), 0); wr32(E1000_RAL(rar_entries), 0); } wrfl(); return count; } /** * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_rx_mode entry point is called whenever the unicast or multicast * address lists or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. **/ static void igb_set_rx_mode(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; u32 rctl, vmolr = 0; int count; /* Check for Promiscuous and All Multicast modes */ rctl = rd32(E1000_RCTL); /* clear the effected bits */ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); if (netdev->flags & IFF_PROMISC) { rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); } else { if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; vmolr |= E1000_VMOLR_MPME; } else { /* * Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ count = igb_write_mc_addr_list(netdev); if (count < 0) { rctl |= E1000_RCTL_MPE; vmolr |= E1000_VMOLR_MPME; } else if (count) { vmolr |= E1000_VMOLR_ROMPE; } } /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable * unicast promiscuous mode */ count = igb_write_uc_addr_list(netdev); if (count < 0) { rctl |= E1000_RCTL_UPE; vmolr |= E1000_VMOLR_ROPE; } rctl |= E1000_RCTL_VFE; } wr32(E1000_RCTL, rctl); /* * In order to support SR-IOV and eventually VMDq it is necessary to set * the VMOLR to enable the appropriate modes. Without this workaround * we will have issues with VLAN tag stripping not being done for frames * that are only arriving because we are the default pool */ if (hw->mac.type < e1000_82576) return; vmolr |= rd32(E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); wr32(E1000_VMOLR(vfn), vmolr); igb_restore_vf_multicasts(adapter); } static void igb_check_wvbr(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 wvbr = 0; switch (hw->mac.type) { case e1000_82576: case e1000_i350: if (!(wvbr = rd32(E1000_WVBR))) return; break; default: break; } adapter->wvbr |= wvbr; } #define IGB_STAGGERED_QUEUE_OFFSET 8 static void igb_spoof_check(struct igb_adapter *adapter) { int j; if (!adapter->wvbr) return; for(j = 0; j < adapter->vfs_allocated_count; j++) { if (adapter->wvbr & (1 << j) || adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { dev_warn(&adapter->pdev->dev, "Spoof event(s) detected on VF %d\n", j); adapter->wvbr &= ~((1 << j) | (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); } } } /* Need to wait a few seconds after link up to get diagnostic information from * the phy */ static void igb_update_phy_info(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *) data; igb_get_phy_info(&adapter->hw); } /** * igb_has_link - check shared code for link and determine up/down * @adapter: pointer to driver private info **/ bool igb_has_link(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; bool link_active = false; s32 ret_val = 0; /* get_link_status is set on LSC (link status) interrupt or * rx sequence error interrupt. get_link_status will stay * false until the e1000_check_for_link establishes link * for copper adapters ONLY */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); link_active = !hw->mac.get_link_status; } else { link_active = true; } break; case e1000_media_type_internal_serdes: ret_val = hw->mac.ops.check_for_link(hw); link_active = hw->mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } return link_active; } static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) { bool ret = false; u32 ctrl_ext, thstat; /* check for thermal sensor event on i350, copper only */ if (hw->mac.type == e1000_i350) { thstat = rd32(E1000_THSTAT); ctrl_ext = rd32(E1000_CTRL_EXT); if ((hw->phy.media_type == e1000_media_type_copper) && !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { ret = !!(thstat & event); } } return ret; } /** * igb_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ static void igb_watchdog(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); } static void igb_watchdog_task(struct work_struct *work) { struct igb_adapter *adapter = container_of(work, struct igb_adapter, watchdog_task); struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 link; int i; link = igb_has_link(adapter); if (link) { if (!netif_carrier_ok(netdev)) { u32 ctrl; hw->mac.ops.get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); ctrl = rd32(E1000_CTRL); /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " "Flow Control: %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex", ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); /* check for thermal sensor event */ if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) { printk(KERN_INFO "igb: %s The network adapter " "link speed was downshifted " "because it overheated.\n", netdev->name); } /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: adapter->tx_timeout_factor = 14; break; case SPEED_100: /* maybe add some timeout factor ? */ break; } netif_carrier_on(netdev); igb_ping_all_vfs(adapter); igb_check_vf_rate_limit(adapter); /* link state has changed, schedule phy info update */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); } } else { if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; /* check for thermal sensor event */ if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) { printk(KERN_ERR "igb: %s The network adapter " "was stopped because it " "overheated.\n", netdev->name); } /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Down\n", netdev->name); netif_carrier_off(netdev); igb_ping_all_vfs(adapter); /* link state has changed, schedule phy info update */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); } } spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); spin_unlock(&adapter->stats64_lock); for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = adapter->tx_ring[i]; if (!netif_carrier_ok(netdev)) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return; } } /* Force detection of hung controller every watchdog period */ tx_ring->detect_tx_hung = true; } /* Cause software interrupt to ensure rx ring is cleaned */ if (adapter->msix_entries) { u32 eics = 0; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; eics |= q_vector->eims_value; } wr32(E1000_EICS, eics); } else { wr32(E1000_ICS, E1000_ICS_RXDMT0); } igb_spoof_check(adapter); /* Reset the timer */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); } enum latency_range { lowest_latency = 0, low_latency = 1, bulk_latency = 2, latency_invalid = 255 }; /** * igb_update_ring_itr - update the dynamic ITR value based on packet size * * Stores a new ITR value based on strictly on packet size. This * algorithm is less sophisticated than that used in igb_update_itr, * due to the difficulty of synchronizing statistics across multiple * receive rings. The divisors and thresholds used by this function * were determined based on theoretical maximum wire speed and testing * data, in order to minimize response time while increasing bulk * throughput. * This functionality is controlled by the InterruptThrottleRate module * parameter (see igb_param.c) * NOTE: This function is called only when operating in a multiqueue * receive environment. * @q_vector: pointer to q_vector **/ static void igb_update_ring_itr(struct igb_q_vector *q_vector) { int new_val = q_vector->itr_val; int avg_wire_size = 0; struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *ring; unsigned int packets; /* For non-gigabit speeds, just fix the interrupt rate at 4000 * ints/sec - ITR timer value of 120 ticks. */ if (adapter->link_speed != SPEED_1000) { new_val = 976; goto set_itr_val; } ring = q_vector->rx_ring; if (ring) { packets = ACCESS_ONCE(ring->total_packets); if (packets) avg_wire_size = ring->total_bytes / packets; } ring = q_vector->tx_ring; if (ring) { packets = ACCESS_ONCE(ring->total_packets); if (packets) avg_wire_size = max_t(u32, avg_wire_size, ring->total_bytes / packets); } /* if avg_wire_size isn't set no work was done */ if (!avg_wire_size) goto clear_counts; /* Add 24 bytes to size to account for CRC, preamble, and gap */ avg_wire_size += 24; /* Don't starve jumbo frames */ avg_wire_size = min(avg_wire_size, 3000); /* Give a little boost to mid-size frames */ if ((avg_wire_size > 300) && (avg_wire_size < 1200)) new_val = avg_wire_size / 3; else new_val = avg_wire_size / 2; /* when in itr mode 3 do not exceed 20K ints/sec */ if (adapter->rx_itr_setting == 3 && new_val < 196) new_val = 196; set_itr_val: if (new_val != q_vector->itr_val) { q_vector->itr_val = new_val; q_vector->set_itr = 1; } clear_counts: if (q_vector->rx_ring) { q_vector->rx_ring->total_bytes = 0; q_vector->rx_ring->total_packets = 0; } if (q_vector->tx_ring) { q_vector->tx_ring->total_bytes = 0; q_vector->tx_ring->total_packets = 0; } } /** * igb_update_itr - update the dynamic ITR value based on statistics * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. * this functionality is controlled by the InterruptThrottleRate module * parameter (see igb_param.c) * NOTE: These calculations are only valid when operating in a single- * queue environment. * @adapter: pointer to adapter * @itr_setting: current q_vector->itr_val * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval **/ static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting, int packets, int bytes) { unsigned int retval = itr_setting; if (packets == 0) goto update_itr_done; switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ if (bytes/packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes/packets > 8000) { retval = bulk_latency; } else if ((packets < 10) || ((bytes/packets) > 1200)) { retval = bulk_latency; } else if ((packets > 35)) { retval = lowest_latency; } } else if (bytes/packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; } break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; } else if (bytes < 1500) { retval = low_latency; } break; } update_itr_done: return retval; } static void igb_set_itr(struct igb_adapter *adapter) { struct igb_q_vector *q_vector = adapter->q_vector[0]; u16 current_itr; u32 new_itr = q_vector->itr_val; /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ if (adapter->link_speed != SPEED_1000) { current_itr = 0; new_itr = 4000; goto set_itr_now; } adapter->rx_itr = igb_update_itr(adapter, adapter->rx_itr, q_vector->rx_ring->total_packets, q_vector->rx_ring->total_bytes); adapter->tx_itr = igb_update_itr(adapter, adapter->tx_itr, q_vector->tx_ring->total_packets, q_vector->tx_ring->total_bytes); current_itr = max(adapter->rx_itr, adapter->tx_itr); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) current_itr = low_latency; switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = 56; /* aka 70,000 ints/sec */ break; case low_latency: new_itr = 196; /* aka 20,000 ints/sec */ break; case bulk_latency: new_itr = 980; /* aka 4,000 ints/sec */ break; default: break; } set_itr_now: q_vector->rx_ring->total_bytes = 0; q_vector->rx_ring->total_packets = 0; q_vector->tx_ring->total_bytes = 0; q_vector->tx_ring->total_packets = 0; if (new_itr != q_vector->itr_val) { /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > q_vector->itr_val ? max((new_itr * q_vector->itr_val) / (new_itr + (q_vector->itr_val >> 2)), new_itr) : new_itr; /* Don't write the value here; it resets the adapter's * internal timer, and causes us to delay far longer than * we should between interrupts. Instead, we write the ITR * value at the beginning of the next interrupt so the timing * ends up being correct. */ q_vector->itr_val = new_itr; q_vector->set_itr = 1; } } #define IGB_TX_FLAGS_CSUM 0x00000001 #define IGB_TX_FLAGS_VLAN 0x00000002 #define IGB_TX_FLAGS_TSO 0x00000004 #define IGB_TX_FLAGS_IPV4 0x00000008 #define IGB_TX_FLAGS_TSTAMP 0x00000010 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGB_TX_FLAGS_VLAN_SHIFT 16 static inline int igb_tso_adv(struct igb_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { struct e1000_adv_tx_context_desc *context_desc; unsigned int i; int err; struct igb_buffer *buffer_info; u32 info = 0, tu_cmd = 0; u32 mss_l4len_idx; u8 l4len; if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } l4len = tcp_hdrlen(skb); *hdr_len += l4len; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); /* VLAN MACLEN IPLEN */ if (tx_flags & IGB_TX_FLAGS_VLAN) info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); *hdr_len += skb_network_offset(skb); info |= skb_network_header_len(skb); *hdr_len += skb_network_header_len(skb); context_desc->vlan_macip_lens = cpu_to_le32(info); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); if (skb->protocol == htons(ETH_P_IP)) tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); /* MSS L4LEN IDX */ mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); /* For 82575, context index must be unique per ring. */ if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) mss_l4len_idx |= tx_ring->reg_idx << 4; context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); context_desc->seqnum_seed = 0; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; return true; } static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { struct e1000_adv_tx_context_desc *context_desc; struct device *dev = tx_ring->dev; struct igb_buffer *buffer_info; u32 info = 0, tu_cmd = 0; unsigned int i; if ((skb->ip_summed == CHECKSUM_PARTIAL) || (tx_flags & IGB_TX_FLAGS_VLAN)) { i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); if (tx_flags & IGB_TX_FLAGS_VLAN) info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) info |= skb_network_header_len(skb); context_desc->vlan_macip_lens = cpu_to_le32(info); tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); if (skb->ip_summed == CHECKSUM_PARTIAL) { __be16 protocol; if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { const struct vlan_ethhdr *vhdr = (const struct vlan_ethhdr*)skb->data; protocol = vhdr->h_vlan_encapsulated_proto; } else { protocol = skb->protocol; } switch (protocol) { case cpu_to_be16(ETH_P_IP): tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; case cpu_to_be16(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; default: if (unlikely(net_ratelimit())) dev_warn(dev, "partial checksum but proto=%x!\n", skb->protocol); break; } } context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); context_desc->seqnum_seed = 0; if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) context_desc->mss_l4len_idx = cpu_to_le32(tx_ring->reg_idx << 4); buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; return true; } return false; } #define IGB_MAX_TXD_PWR 16 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, unsigned int first) { struct igb_buffer *buffer_info; struct device *dev = tx_ring->dev; unsigned int hlen = skb_headlen(skb); unsigned int count = 0, i; unsigned int f; u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1; i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD); buffer_info->length = hlen; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = dma_map_single(dev, skb->data, hlen, DMA_TO_DEVICE); if (dma_mapping_error(dev, buffer_info->dma)) goto dma_error; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; unsigned int len = frag->size; count++; i++; if (i == tx_ring->count) i = 0; buffer_info = &tx_ring->buffer_info[i]; BUG_ON(len >= IGB_MAX_DATA_PER_TXD); buffer_info->length = len; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->mapped_as_page = true; buffer_info->dma = dma_map_page(dev, frag->page, frag->page_offset, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, buffer_info->dma)) goto dma_error; } tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags; /* multiply data chunks by size of headers */ tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; tx_ring->buffer_info[i].gso_segs = gso_segs; tx_ring->buffer_info[first].next_to_watch = i; return ++count; dma_error: dev_err(dev, "TX DMA map failed\n"); /* clear timestamp and dma mappings for failed buffer_info mapping */ buffer_info->dma = 0; buffer_info->time_stamp = 0; buffer_info->length = 0; buffer_info->next_to_watch = 0; buffer_info->mapped_as_page = false; /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { if (i == 0) i = tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } return 0; } static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, u32 tx_flags, int count, u32 paylen, u8 hdr_len) { union e1000_adv_tx_desc *tx_desc; struct igb_buffer *buffer_info; u32 olinfo_status = 0, cmd_type_len; unsigned int i = tx_ring->next_to_use; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); if (tx_flags & IGB_TX_FLAGS_VLAN) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; if (tx_flags & IGB_TX_FLAGS_TSTAMP) cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; if (tx_flags & IGB_TX_FLAGS_TSO) { cmd_type_len |= E1000_ADVTXD_DCMD_TSE; /* insert tcp checksum */ olinfo_status |= E1000_TXD_POPTS_TXSM << 8; /* insert ip checksum */ if (tx_flags & IGB_TX_FLAGS_IPV4) olinfo_status |= E1000_TXD_POPTS_IXSM << 8; } else if (tx_flags & IGB_TX_FLAGS_CSUM) { olinfo_status |= E1000_TXD_POPTS_TXSM << 8; } if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_VLAN))) olinfo_status |= tx_ring->reg_idx << 4; olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); do { buffer_info = &tx_ring->buffer_info[i]; tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); count--; i++; if (i == tx_ring->count) i = 0; } while (count > 0); tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); tx_ring->next_to_use = i; writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail * at a time, it syncronizes IO on IA64/Altix systems */ mmiowb(); } static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) { struct net_device *netdev = tx_ring->netdev; netif_stop_subqueue(netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again in a case another CPU has just * made room available. */ if (igb_desc_unused(tx_ring) < size) return -EBUSY; /* A reprieve! */ netif_wake_subqueue(netdev, tx_ring->queue_index); u64_stats_update_begin(&tx_ring->tx_syncp2); tx_ring->tx_stats.restart_queue2++; u64_stats_update_end(&tx_ring->tx_syncp2); return 0; } static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) { if (igb_desc_unused(tx_ring) >= size) return 0; return __igb_maybe_stop_tx(tx_ring, size); } netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, struct igb_ring *tx_ring) { int tso = 0, count; u32 tx_flags = 0; u16 first; u8 hdr_len = 0; /* need: 1 descriptor per page, * + 2 desc gap to keep tail from touching head, * + 1 desc for skb->data, * + 1 desc for context descriptor, * otherwise try next time */ if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { /* this is a hard error */ return NETDEV_TX_BUSY; } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; } if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); } if (skb->protocol == htons(ETH_P_IP)) tx_flags |= IGB_TX_FLAGS_IPV4; first = tx_ring->next_to_use; if (skb_is_gso(skb)) { tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } if (tso) tx_flags |= IGB_TX_FLAGS_TSO; else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGB_TX_FLAGS_CSUM; /* * count reflects descriptors mapped, if 0 or less then mapping error * has occurred and we need to rewind the descriptor queue */ count = igb_tx_map_adv(tx_ring, skb, first); if (!count) { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; tx_ring->next_to_use = first; return NETDEV_TX_OK; } igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); return NETDEV_TX_OK; } static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *tx_ring; int r_idx = 0; if (test_bit(__IGB_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); tx_ring = adapter->multi_tx_table[r_idx]; /* This goes back to the question of how to logically map a tx queue * to a flow. Right now, performance is impacted slightly negatively * if using multiple tx queues. If the stack breaks away from a * single qdisc implementation, we can look at this again. */ return igb_xmit_frame_ring_adv(skb, tx_ring); } /** * igb_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ static void igb_tx_timeout(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; /* Do the reset outside of interrupt context */ adapter->tx_timeout_count++; if (hw->mac.type == e1000_82580) hw->dev_spec._82575.global_device_reset = true; schedule_work(&adapter->reset_task); wr32(E1000_EICS, (adapter->eims_enable_mask & ~adapter->eims_other)); } static void igb_reset_task(struct work_struct *work) { struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); } /** * igb_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer * **/ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct igb_adapter *adapter = netdev_priv(netdev); spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); memcpy(stats, &adapter->stats64, sizeof(*stats)); spin_unlock(&adapter->stats64_lock); return stats; } /** * igb_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int igb_change_mtu(struct net_device *netdev, int new_mtu) { struct igb_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; u32 rx_buffer_len, i; if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { dev_err(&pdev->dev, "Invalid MTU setting\n"); return -EINVAL; } if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); return -EINVAL; } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); /* igb_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab */ if (adapter->hw.mac.type == e1000_82580) max_frame += IGB_TS_HDR_LEN; if (max_frame <= IGB_RXBUFFER_1024) rx_buffer_len = IGB_RXBUFFER_1024; else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; else rx_buffer_len = IGB_RXBUFFER_128; if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) || (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN)) rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN; if ((adapter->hw.mac.type == e1000_82580) && (rx_buffer_len == IGB_RXBUFFER_128)) rx_buffer_len += IGB_RXBUFFER_64; if (netif_running(netdev)) igb_down(adapter); dev_info(&pdev->dev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len; if (netif_running(netdev)) igb_up(adapter); else igb_reset(adapter); clear_bit(__IGB_RESETTING, &adapter->state); return 0; } /** * igb_update_stats - Update the board statistics counters * @adapter: board private structure **/ void igb_update_stats(struct igb_adapter *adapter, struct rtnl_link_stats64 *net_stats) { struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 reg, mpc; u16 phy_tmp; int i; u64 bytes, packets; unsigned int start; u64 _bytes, _packets; #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF /* * Prevent stats update while adapter is being reset, or if the pci * connection is down. */ if (adapter->link_speed == 0) return; if (pci_channel_offline(pdev)) return; bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; struct igb_ring *ring = adapter->rx_ring[i]; ring->rx_stats.drops += rqdpc_tmp; net_stats->rx_fifo_errors += rqdpc_tmp; do { start = u64_stats_fetch_begin_bh(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } net_stats->rx_bytes = bytes; net_stats->rx_packets = packets; bytes = 0; packets = 0; for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; /* read stats registers */ adapter->stats.crcerrs += rd32(E1000_CRCERRS); adapter->stats.gprc += rd32(E1000_GPRC); adapter->stats.gorc += rd32(E1000_GORCL); rd32(E1000_GORCH); /* clear GORCL */ adapter->stats.bprc += rd32(E1000_BPRC); adapter->stats.mprc += rd32(E1000_MPRC); adapter->stats.roc += rd32(E1000_ROC); adapter->stats.prc64 += rd32(E1000_PRC64); adapter->stats.prc127 += rd32(E1000_PRC127); adapter->stats.prc255 += rd32(E1000_PRC255); adapter->stats.prc511 += rd32(E1000_PRC511); adapter->stats.prc1023 += rd32(E1000_PRC1023); adapter->stats.prc1522 += rd32(E1000_PRC1522); adapter->stats.symerrs += rd32(E1000_SYMERRS); adapter->stats.sec += rd32(E1000_SEC); mpc = rd32(E1000_MPC); adapter->stats.mpc += mpc; net_stats->rx_fifo_errors += mpc; adapter->stats.scc += rd32(E1000_SCC); adapter->stats.ecol += rd32(E1000_ECOL); adapter->stats.mcc += rd32(E1000_MCC); adapter->stats.latecol += rd32(E1000_LATECOL); adapter->stats.dc += rd32(E1000_DC); adapter->stats.rlec += rd32(E1000_RLEC); adapter->stats.xonrxc += rd32(E1000_XONRXC); adapter->stats.xontxc += rd32(E1000_XONTXC); adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); adapter->stats.xofftxc += rd32(E1000_XOFFTXC); adapter->stats.fcruc += rd32(E1000_FCRUC); adapter->stats.gptc += rd32(E1000_GPTC); adapter->stats.gotc += rd32(E1000_GOTCL); rd32(E1000_GOTCH); /* clear GOTCL */ adapter->stats.rnbc += rd32(E1000_RNBC); adapter->stats.ruc += rd32(E1000_RUC); adapter->stats.rfc += rd32(E1000_RFC); adapter->stats.rjc += rd32(E1000_RJC); adapter->stats.tor += rd32(E1000_TORH); adapter->stats.tot += rd32(E1000_TOTH); adapter->stats.tpr += rd32(E1000_TPR); adapter->stats.ptc64 += rd32(E1000_PTC64); adapter->stats.ptc127 += rd32(E1000_PTC127); adapter->stats.ptc255 += rd32(E1000_PTC255); adapter->stats.ptc511 += rd32(E1000_PTC511); adapter->stats.ptc1023 += rd32(E1000_PTC1023); adapter->stats.ptc1522 += rd32(E1000_PTC1522); adapter->stats.mptc += rd32(E1000_MPTC); adapter->stats.bptc += rd32(E1000_BPTC); adapter->stats.tpt += rd32(E1000_TPT); adapter->stats.colc += rd32(E1000_COLC); adapter->stats.algnerrc += rd32(E1000_ALGNERRC); /* read internal phy specific stats */ reg = rd32(E1000_CTRL_EXT); if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { adapter->stats.rxerrc += rd32(E1000_RXERRC); adapter->stats.tncrs += rd32(E1000_TNCRS); } adapter->stats.tsctc += rd32(E1000_TSCTC); adapter->stats.tsctfc += rd32(E1000_TSCTFC); adapter->stats.iac += rd32(E1000_IAC); adapter->stats.icrxoc += rd32(E1000_ICRXOC); adapter->stats.icrxptc += rd32(E1000_ICRXPTC); adapter->stats.icrxatc += rd32(E1000_ICRXATC); adapter->stats.ictxptc += rd32(E1000_ICTXPTC); adapter->stats.ictxatc += rd32(E1000_ICTXATC); adapter->stats.ictxqec += rd32(E1000_ICTXQEC); adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); /* Fill out the OS statistics structure */ net_stats->multicast = adapter->stats.mprc; net_stats->collisions = adapter->stats.colc; /* Rx Errors */ /* RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ net_stats->rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; net_stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; net_stats->rx_crc_errors = adapter->stats.crcerrs; net_stats->rx_frame_errors = adapter->stats.algnerrc; net_stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ net_stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; net_stats->tx_aborted_errors = adapter->stats.ecol; net_stats->tx_window_errors = adapter->stats.latecol; net_stats->tx_carrier_errors = adapter->stats.tncrs; /* Tx Dropped needs to be maintained elsewhere */ /* Phy Stats */ if (hw->phy.media_type == e1000_media_type_copper) { if ((adapter->link_speed == SPEED_1000) && (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; adapter->phy_stats.idle_errors += phy_tmp; } } /* Management Stats */ adapter->stats.mgptc += rd32(E1000_MGTPTC); adapter->stats.mgprc += rd32(E1000_MGTPRC); adapter->stats.mgpdc += rd32(E1000_MGTPDC); /* OS2BMC Stats */ reg = rd32(E1000_MANC); if (reg & E1000_MANC_EN_BMC2OS) { adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); adapter->stats.o2bspc += rd32(E1000_O2BSPC); adapter->stats.b2ospc += rd32(E1000_B2OSPC); adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); } } static irqreturn_t igb_msix_other(int irq, void *data) { struct igb_adapter *adapter = data; struct e1000_hw *hw = &adapter->hw; u32 icr = rd32(E1000_ICR); /* reading ICR causes bit 31 of EICR to be cleared */ if (icr & E1000_ICR_DRSTA) schedule_work(&adapter->reset_task); if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; /* The DMA Out of Sync is also indication of a spoof event * in IOV mode. Check the Wrong VM Behavior register to * see if it is really a spoof event. */ igb_check_wvbr(adapter); } /* Check for a mailbox event */ if (icr & E1000_ICR_VMMB) igb_msg_task(adapter); if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = 1; /* guard against interrupt when we're going down */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } if (adapter->vfs_allocated_count) wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_VMMB | E1000_IMS_DOUTSYNC); else wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); wr32(E1000_EIMS, adapter->eims_other); return IRQ_HANDLED; } static void igb_write_itr(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; u32 itr_val = q_vector->itr_val & 0x7FFC; if (!q_vector->set_itr) return; if (!itr_val) itr_val = 0x4; if (adapter->hw.mac.type == e1000_82575) itr_val |= itr_val << 16; else itr_val |= 0x8000000; writel(itr_val, q_vector->itr_register); q_vector->set_itr = 0; } static irqreturn_t igb_msix_ring(int irq, void *data) { struct igb_q_vector *q_vector = data; /* Write the ITR value calculated from the previous interrupt. */ igb_write_itr(q_vector); napi_schedule(&q_vector->napi); return IRQ_HANDLED; } #ifdef CONFIG_IGB_DCA static void igb_update_dca(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; int cpu = get_cpu(); if (q_vector->cpu == cpu) goto out_no_update; if (q_vector->tx_ring) { int q = q_vector->tx_ring->reg_idx; u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); if (hw->mac.type == e1000_82575) { dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); } else { dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << E1000_DCA_TXCTRL_CPUID_SHIFT; } dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; wr32(E1000_DCA_TXCTRL(q), dca_txctrl); } if (q_vector->rx_ring) { int q = q_vector->rx_ring->reg_idx; u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); if (hw->mac.type == e1000_82575) { dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); } else { dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << E1000_DCA_RXCTRL_CPUID_SHIFT; } dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); } q_vector->cpu = cpu; out_no_update: put_cpu(); } static void igb_setup_dca(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) return; /* Always use CB2 mode, difference is masked in the CB driver. */ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); for (i = 0; i < adapter->num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; igb_update_dca(adapter->q_vector[i]); } } static int __igb_notify_dca(struct device *dev, void *data) { struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; unsigned long event = *(unsigned long *)data; switch (event) { case DCA_PROVIDER_ADD: /* if already enabled, don't do it again */ if (adapter->flags & IGB_FLAG_DCA_ENABLED) break; if (dca_add_requester(dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); break; } /* Fall Through since DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IGB_FLAG_DCA_ENABLED) { /* without this a class_device is left * hanging around in the sysfs model */ dca_remove_requester(dev); dev_info(&pdev->dev, "DCA disabled\n"); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } break; } return 0; } static int igb_notify_dca(struct notifier_block *nb, unsigned long event, void *p) { int ret_val; ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, __igb_notify_dca); return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } #endif /* CONFIG_IGB_DCA */ static void igb_ping_all_vfs(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ping; int i; for (i = 0 ; i < adapter->vfs_allocated_count; i++) { ping = E1000_PF_CONTROL_MSG; if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) ping |= E1000_VT_MSGTYPE_CTS; igb_write_mbx(hw, &ping, 1, i); } } static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 vmolr = rd32(E1000_VMOLR(vf)); struct vf_data_storage *vf_data = &adapter->vf_data[vf]; vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | IGB_VF_FLAG_MULTI_PROMISC); vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { vmolr |= E1000_VMOLR_MPME; vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; } else { /* * if we have hashes and we are clearing a multicast promisc * flag we need to write the hashes to the MTA as this step * was previously skipped */ if (vf_data->num_vf_mc_hashes > 30) { vmolr |= E1000_VMOLR_MPME; } else if (vf_data->num_vf_mc_hashes) { int j; vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) igb_mta_set(hw, vf_data->vf_mc_hashes[j]); } } wr32(E1000_VMOLR(vf), vmolr); /* there are flags left unprocessed, likely not supported */ if (*msgbuf & E1000_VT_MSGINFO_MASK) return -EINVAL; return 0; } static int igb_set_vf_multicasts(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; int i; /* salt away the number of multicast addresses assigned * to this VF for later use to restore when the PF multi cast * list changes */ vf_data->num_vf_mc_hashes = n; /* only up to 30 hash values supported */ if (n > 30) n = 30; /* store the hashes for later use */ for (i = 0; i < n; i++) vf_data->vf_mc_hashes[i] = hash_list[i]; /* Flush and reset the mta with the new values */ igb_set_rx_mode(adapter->netdev); return 0; } static void igb_restore_vf_multicasts(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct vf_data_storage *vf_data; int i, j; for (i = 0; i < adapter->vfs_allocated_count; i++) { u32 vmolr = rd32(E1000_VMOLR(i)); vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); vf_data = &adapter->vf_data[i]; if ((vf_data->num_vf_mc_hashes > 30) || (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { vmolr |= E1000_VMOLR_MPME; } else if (vf_data->num_vf_mc_hashes) { vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) igb_mta_set(hw, vf_data->vf_mc_hashes[j]); } wr32(E1000_VMOLR(i), vmolr); } } static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 pool_mask, reg, vid; int i; pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); /* Find the vlan filter for this id */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { reg = rd32(E1000_VLVF(i)); /* remove the vf from the pool */ reg &= ~pool_mask; /* if pool is empty then remove entry from vfta */ if (!(reg & E1000_VLVF_POOLSEL_MASK) && (reg & E1000_VLVF_VLANID_ENABLE)) { reg = 0; vid = reg & E1000_VLVF_VLANID_MASK; igb_vfta_set(hw, vid, false); } wr32(E1000_VLVF(i), reg); } adapter->vf_data[vf].vlans_enabled = 0; } static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 reg, i; /* The vlvf table only exists on 82576 hardware and newer */ if (hw->mac.type < e1000_82576) return -1; /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return -1; /* Find the vlan filter for this id */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { reg = rd32(E1000_VLVF(i)); if ((reg & E1000_VLVF_VLANID_ENABLE) && vid == (reg & E1000_VLVF_VLANID_MASK)) break; } if (add) { if (i == E1000_VLVF_ARRAY_SIZE) { /* Did not find a matching VLAN ID entry that was * enabled. Search for a free filter entry, i.e. * one without the enable bit set */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { reg = rd32(E1000_VLVF(i)); if (!(reg & E1000_VLVF_VLANID_ENABLE)) break; } } if (i < E1000_VLVF_ARRAY_SIZE) { /* Found an enabled/available entry */ reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); /* if !enabled we need to set this up in vfta */ if (!(reg & E1000_VLVF_VLANID_ENABLE)) { /* add VID to filter table */ igb_vfta_set(hw, vid, true); reg |= E1000_VLVF_VLANID_ENABLE; } reg &= ~E1000_VLVF_VLANID_MASK; reg |= vid; wr32(E1000_VLVF(i), reg); /* do not modify RLPML for PF devices */ if (vf >= adapter->vfs_allocated_count) return 0; if (!adapter->vf_data[vf].vlans_enabled) { u32 size; reg = rd32(E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size += 4; reg &= ~E1000_VMOLR_RLPML_MASK; reg |= size; wr32(E1000_VMOLR(vf), reg); } adapter->vf_data[vf].vlans_enabled++; return 0; } } else { if (i < E1000_VLVF_ARRAY_SIZE) { /* remove vf from the pool */ reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); /* if pool is empty then remove entry from vfta */ if (!(reg & E1000_VLVF_POOLSEL_MASK)) { reg = 0; igb_vfta_set(hw, vid, false); } wr32(E1000_VLVF(i), reg); /* do not modify RLPML for PF devices */ if (vf >= adapter->vfs_allocated_count) return 0; adapter->vf_data[vf].vlans_enabled--; if (!adapter->vf_data[vf].vlans_enabled) { u32 size; reg = rd32(E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size -= 4; reg &= ~E1000_VMOLR_RLPML_MASK; reg |= size; wr32(E1000_VMOLR(vf), reg); } } } return 0; } static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) { struct e1000_hw *hw = &adapter->hw; if (vid) wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); else wr32(E1000_VMVIR(vf), 0); } static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { int err = 0; struct igb_adapter *adapter = netdev_priv(netdev); if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { err = igb_vlvf_set(adapter, vlan, !!vlan, vf); if (err) goto out; igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); igb_set_vmolr(adapter, vf, !vlan); adapter->vf_data[vf].pf_vlan = vlan; adapter->vf_data[vf].pf_qos = qos; dev_info(&adapter->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__IGB_DOWN, &adapter->state)) { dev_warn(&adapter->pdev->dev, "The VF VLAN has been set," " but the PF device is not up.\n"); dev_warn(&adapter->pdev->dev, "Bring the PF device up before" " attempting to use the VF device.\n"); } } else { igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, false, vf); igb_set_vmvir(adapter, vlan, vf); igb_set_vmolr(adapter, vf, true); adapter->vf_data[vf].pf_vlan = 0; adapter->vf_data[vf].pf_qos = 0; } out: return err; } static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); return igb_vlvf_set(adapter, vid, add, vf); } static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) { /* clear flags - except flag that indicates PF has set the MAC */ adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; adapter->vf_data[vf].last_nack = jiffies; /* reset offloads to defaults */ igb_set_vmolr(adapter, vf, true); /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); if (adapter->vf_data[vf].pf_vlan) igb_ndo_set_vf_vlan(adapter->netdev, vf, adapter->vf_data[vf].pf_vlan, adapter->vf_data[vf].pf_qos); else igb_clear_vf_vfta(adapter, vf); /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; /* Flush and reset the mta with the new values */ igb_set_rx_mode(adapter->netdev); } static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) { unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; /* generate a new mac address as we were hotplug removed/added */ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) random_ether_addr(vf_mac); /* process remaining reset events */ igb_vf_reset(adapter, vf); } static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; int rar_entry = hw->mac.rar_entry_count - (vf + 1); u32 reg, msgbuf[3]; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ igb_vf_reset(adapter, vf); /* set vf mac address */ igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); /* enable transmit and receive for vf */ reg = rd32(E1000_VFTE); wr32(E1000_VFTE, reg | (1 << vf)); reg = rd32(E1000_VFRE); wr32(E1000_VFRE, reg | (1 << vf)); adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; /* reply to reset with ack and vf mac address */ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; memcpy(addr, vf_mac, 6); igb_write_mbx(hw, msgbuf, 3, vf); } static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) { /* * The VF MAC Address is stored in a packed array of bytes * starting at the second 32 bit word of the msg array */ unsigned char *addr = (char *)&msg[1]; int err = -1; if (is_valid_ether_addr(addr)) err = igb_set_vf_mac(adapter, vf, addr); return err; } static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; u32 msg = E1000_VT_MSGTYPE_NACK; /* if device isn't clear to send it shouldn't be reading either */ if (!(vf_data->flags & IGB_VF_FLAG_CTS) && time_after(jiffies, vf_data->last_nack + (2 * HZ))) { igb_write_mbx(hw, &msg, 1, vf); vf_data->last_nack = jiffies; } } static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) { struct pci_dev *pdev = adapter->pdev; u32 msgbuf[E1000_VFMAILBOX_SIZE]; struct e1000_hw *hw = &adapter->hw; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; s32 retval; retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); if (retval) { /* if receive failed revoke VF CTS stats and restart init */ dev_err(&pdev->dev, "Error receiving message from VF\n"); vf_data->flags &= ~IGB_VF_FLAG_CTS; if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) return; goto out; } /* this is a message we already processed, do nothing */ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) return; /* * until the vf completes a reset it should not be * allowed to start any configuration. */ if (msgbuf[0] == E1000_VF_RESET) { igb_vf_reset_msg(adapter, vf); return; } if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) return; retval = -1; goto out; } switch ((msgbuf[0] & 0xFFFF)) { case E1000_VF_SET_MAC_ADDR: retval = -EINVAL; if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); else dev_warn(&pdev->dev, "VF %d attempted to override administratively " "set MAC address\nReload the VF driver to " "resume operations\n", vf); break; case E1000_VF_SET_PROMISC: retval = igb_set_vf_promisc(adapter, msgbuf, vf); break; case E1000_VF_SET_MULTICAST: retval = igb_set_vf_multicasts(adapter, msgbuf, vf); break; case E1000_VF_SET_LPE: retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); break; case E1000_VF_SET_VLAN: retval = -1; if (vf_data->pf_vlan) dev_warn(&pdev->dev, "VF %d attempted to override administratively " "set VLAN tag\nReload the VF driver to " "resume operations\n", vf); else retval = igb_set_vf_vlan(adapter, msgbuf, vf); break; default: dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); retval = -1; break; } msgbuf[0] |= E1000_VT_MSGTYPE_CTS; out: /* notify the VF of the results of what it sent us */ if (retval) msgbuf[0] |= E1000_VT_MSGTYPE_NACK; else msgbuf[0] |= E1000_VT_MSGTYPE_ACK; igb_write_mbx(hw, msgbuf, 1, vf); } static void igb_msg_task(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 vf; for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { /* process any reset requests */ if (!igb_check_for_rst(hw, vf)) igb_vf_reset_event(adapter, vf); /* process any messages pending */ if (!igb_check_for_msg(hw, vf)) igb_rcv_msg_from_vf(adapter, vf); /* process any acks */ if (!igb_check_for_ack(hw, vf)) igb_rcv_ack_from_vf(adapter, vf); } } /** * igb_set_uta - Set unicast filter table address * @adapter: board private structure * * The unicast table address is a register array of 32-bit registers. * The table is meant to be used in a way similar to how the MTA is used * however due to certain limitations in the hardware it is necessary to * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ static void igb_set_uta(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; /* The UTA table only exists on 82576 hardware and newer */ if (hw->mac.type < e1000_82576) return; /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return; for (i = 0; i < hw->mac.uta_reg_count; i++) array_wr32(E1000_UTA, i, ~0); } /** * igb_intr_msi - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t igb_intr_msi(int irq, void *data) { struct igb_adapter *adapter = data; struct igb_q_vector *q_vector = adapter->q_vector[0]; struct e1000_hw *hw = &adapter->hw; /* read ICR disables interrupts using IAM */ u32 icr = rd32(E1000_ICR); igb_write_itr(q_vector); if (icr & E1000_ICR_DRSTA) schedule_work(&adapter->reset_task); if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; } if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { hw->mac.get_link_status = 1; if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } napi_schedule(&q_vector->napi); return IRQ_HANDLED; } /** * igb_intr - Legacy Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t igb_intr(int irq, void *data) { struct igb_adapter *adapter = data; struct igb_q_vector *q_vector = adapter->q_vector[0]; struct e1000_hw *hw = &adapter->hw; /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No * need for the IMC write */ u32 icr = rd32(E1000_ICR); if (!icr) return IRQ_NONE; /* Not our interrupt */ igb_write_itr(q_vector); /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ if (!(icr & E1000_ICR_INT_ASSERTED)) return IRQ_NONE; if (icr & E1000_ICR_DRSTA) schedule_work(&adapter->reset_task); if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; } if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { hw->mac.get_link_status = 1; /* guard against interrupt when we're going down */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } napi_schedule(&q_vector->napi); return IRQ_HANDLED; } static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { if (!adapter->msix_entries) igb_set_itr(adapter); else igb_update_ring_itr(q_vector); } if (!test_bit(__IGB_DOWN, &adapter->state)) { if (adapter->msix_entries) wr32(E1000_EIMS, q_vector->eims_value); else igb_irq_enable(adapter); } } /** * igb_poll - NAPI Rx polling callback * @napi: napi polling structure * @budget: count of how many packets we should handle **/ static int igb_poll(struct napi_struct *napi, int budget) { struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi); int tx_clean_complete = 1, work_done = 0; #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); #endif if (q_vector->tx_ring) tx_clean_complete = igb_clean_tx_irq(q_vector); if (q_vector->rx_ring) igb_clean_rx_irq_adv(q_vector, &work_done, budget); if (!tx_clean_complete) work_done = budget; /* If not enough Rx work done, exit the polling mode */ if (work_done < budget) { napi_complete(napi); igb_ring_irq_enable(q_vector); } return work_done; } /** * igb_systim_to_hwtstamp - convert system time value to hw timestamp * @adapter: board private structure * @shhwtstamps: timestamp structure to update * @regval: unsigned 64bit system time value. * * We need to convert the system time value stored in the RX/TXSTMP registers * into a hwtstamp which can be used by the upper level timestamping functions */ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, struct skb_shared_hwtstamps *shhwtstamps, u64 regval) { u64 ns; /* * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to * 24 to match clock shift we setup earlier. */ if (adapter->hw.mac.type == e1000_82580) regval <<= IGB_82580_TSYNC_SHIFT; ns = timecounter_cyc2time(&adapter->clock, regval); timecompare_update(&adapter->compare, ns); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(ns); shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); } /** * igb_tx_hwtstamp - utility function which checks for TX time stamp * @q_vector: pointer to q_vector containing needed info * @buffer: pointer to igb_buffer structure * * If we were asked to do hardware stamping and such a time stamp is * available, then it must have been for this skb here because we only * allow only one such packet into the queue. */ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval; /* if skb does not support hw timestamp or TX stamp not valid exit */ if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) || !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) return; regval = rd32(E1000_TXSTMPL); regval |= (u64)rd32(E1000_TXSTMPH) << 32; igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); skb_tstamp_tx(buffer_info->skb, &shhwtstamps); } /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info * returns true if ring is completely cleaned **/ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *tx_ring = q_vector->tx_ring; struct net_device *netdev = tx_ring->netdev; struct e1000_hw *hw = &adapter->hw; struct igb_buffer *buffer_info; union e1000_adv_tx_desc *tx_desc, *eop_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int i, eop, count = 0; bool cleaned = false; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); if (buffer_info->skb) { total_bytes += buffer_info->bytecount; /* gso_segs is currently only valid for tcp */ total_packets += buffer_info->gso_segs; igb_tx_hwtstamp(q_vector, buffer_info); } igb_unmap_and_free_tx_resource(tx_ring, buffer_info); tx_desc->wb.status = 0; i++; if (i == tx_ring->count) i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); } tx_ring->next_to_clean = i; if (unlikely(count && netif_carrier_ok(netdev) && igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !(test_bit(__IGB_DOWN, &adapter->state))) { netif_wake_subqueue(netdev, tx_ring->queue_index); u64_stats_update_begin(&tx_ring->tx_syncp); tx_ring->tx_stats.restart_queue++; u64_stats_update_end(&tx_ring->tx_syncp); } } if (tx_ring->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ tx_ring->detect_tx_hung = false; if (tx_ring->buffer_info[i].time_stamp && time_after(jiffies, tx_ring->buffer_info[i].time_stamp + (adapter->tx_timeout_factor * HZ)) && !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ dev_err(tx_ring->dev, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" " desc.status <%x>\n", tx_ring->queue_index, readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, eop, jiffies, eop_desc->wb.status); netif_stop_subqueue(netdev, tx_ring->queue_index); } } tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; u64_stats_update_begin(&tx_ring->tx_syncp); tx_ring->tx_stats.bytes += total_bytes; tx_ring->tx_stats.packets += total_packets; u64_stats_update_end(&tx_ring->tx_syncp); return count < tx_ring->count; } /** * igb_receive_skb - helper function to handle rx indications * @q_vector: structure containing interrupt and ring information * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ static void igb_receive_skb(struct igb_q_vector *q_vector, struct sk_buff *skb, u16 vlan_tag) { struct igb_adapter *adapter = q_vector->adapter; if (vlan_tag && adapter->vlgrp) vlan_gro_receive(&q_vector->napi, adapter->vlgrp, vlan_tag, skb); else napi_gro_receive(&q_vector->napi, skb); } static inline void igb_rx_checksum_adv(struct igb_ring *ring, u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Ignore Checksum bit is set or checksum is disabled through ethtool */ if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || (status_err & E1000_RXD_STAT_IXSM)) return; /* TCP/UDP checksum error bit is set */ if (status_err & (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { /* * work around errata with sctp packets where the TCPE aka * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) * packets, (aka let the stack check the crc32c) */ if ((skb->len == 60) && (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { u64_stats_update_begin(&ring->rx_syncp); ring->rx_stats.csum_err++; u64_stats_update_end(&ring->rx_syncp); } /* let the stack verify checksum errors */ return; } /* It must be a TCP or UDP packet with a valid checksum */ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err); } static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, struct sk_buff *skb) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; u64 regval; /* * If this bit is set, then the RX registers contain the time stamp. No * other packet will be time stamped until we read these registers, so * read the registers to make them available again. Because only one * packet can be time stamped at a time, we know that the register * values must belong to this one here and therefore we don't need to * compare any of the additional attributes stored for it. * * If nothing went wrong, then it should have a shared tx_flags that we * can turn into a skb_shared_hwtstamps. */ if (staterr & E1000_RXDADV_STAT_TSIP) { u32 *stamp = (u32 *)skb->data; regval = le32_to_cpu(*(stamp + 2)); regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; skb_pull(skb, IGB_TS_HDR_LEN); } else { if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) return; regval = rd32(E1000_RXSTMPL); regval |= (u64)rd32(E1000_RXSTMPH) << 32; } igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } static inline u16 igb_get_hlen(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc) { /* HW will not DMA in data larger than the given buffer, even if it * parses the (NFS, of course) header to be larger. In that case, it * fills the header buffer and spills the rest into the page. */ u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > rx_ring->rx_buffer_len) hlen = rx_ring->rx_buffer_len; return hlen; } static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, int *work_done, int budget) { struct igb_ring *rx_ring = q_vector->rx_ring; struct net_device *netdev = rx_ring->netdev; struct device *dev = rx_ring->dev; union e1000_adv_rx_desc *rx_desc , *next_rxd; struct igb_buffer *buffer_info , *next_buffer; struct sk_buff *skb; bool cleaned = false; int cleaned_count = 0; int current_node = numa_node_id(); unsigned int total_bytes = 0, total_packets = 0; unsigned int i; u32 staterr; u16 length; u16 vlan_tag; i = rx_ring->next_to_clean; buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); while (staterr & E1000_RXD_STAT_DD) { if (*work_done >= budget) break; (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); buffer_info->skb = NULL; i++; if (i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; length = le16_to_cpu(rx_desc->wb.upper.length); cleaned = true; cleaned_count++; if (buffer_info->dma) { dma_unmap_single(dev, buffer_info->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { skb_put(skb, length); goto send_up; } skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); } if (length) { dma_unmap_page(dev, buffer_info->page_dma, PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, buffer_info->page, buffer_info->page_offset, length); if ((page_count(buffer_info->page) != 1) || (page_to_nid(buffer_info->page) != current_node)) buffer_info->page = NULL; else get_page(buffer_info->page); skb->len += length; skb->data_len += length; skb->truesize += length; } if (!(staterr & E1000_RXD_STAT_EOP)) { buffer_info->skb = next_buffer->skb; buffer_info->dma = next_buffer->dma; next_buffer->skb = skb; next_buffer->dma = 0; goto next_desc; } send_up: if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { dev_kfree_skb_irq(skb); goto next_desc; } if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS)) igb_rx_hwtstamp(q_vector, staterr, skb); total_bytes += skb->len; total_packets++; igb_rx_checksum_adv(rx_ring, staterr, skb); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, rx_ring->queue_index); vlan_tag = ((staterr & E1000_RXD_STAT_VP) ? le16_to_cpu(rx_desc->wb.upper.vlan) : 0); igb_receive_skb(q_vector, skb, vlan_tag); next_desc: rx_desc->wb.upper.status_error = 0; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; cleaned_count = igb_desc_unused(rx_ring); if (cleaned_count) igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); rx_ring->total_packets += total_packets; rx_ring->total_bytes += total_bytes; u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; u64_stats_update_end(&rx_ring->rx_syncp); return cleaned; } /** * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split * @adapter: address of board private structure **/ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) { struct net_device *netdev = rx_ring->netdev; union e1000_adv_rx_desc *rx_desc; struct igb_buffer *buffer_info; struct sk_buff *skb; unsigned int i; int bufsz; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; bufsz = rx_ring->rx_buffer_len; while (cleaned_count--) { rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { if (!buffer_info->page) { buffer_info->page = netdev_alloc_page(netdev); if (unlikely(!buffer_info->page)) { u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } buffer_info->page_offset = 0; } else { buffer_info->page_offset ^= PAGE_SIZE / 2; } buffer_info->page_dma = dma_map_page(rx_ring->dev, buffer_info->page, buffer_info->page_offset, PAGE_SIZE / 2, DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, buffer_info->page_dma)) { buffer_info->page_dma = 0; u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } } skb = buffer_info->skb; if (!skb) { skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (unlikely(!skb)) { u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } buffer_info->skb = skb; } if (!buffer_info->dma) { buffer_info->dma = dma_map_single(rx_ring->dev, skb->data, bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, buffer_info->dma)) { buffer_info->dma = 0; u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ if (bufsz < IGB_RXBUFFER_1024) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } i++; if (i == rx_ring->count) i = 0; buffer_info = &rx_ring->buffer_info[i]; } no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; if (i == 0) i = (rx_ring->count - 1); else i--; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(i, rx_ring->tail); } } /** * igb_mii_ioctl - * @netdev: * @ifreq: * @cmd: **/ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); if (adapter->hw.phy.media_type != e1000_media_type_copper) return -EOPNOTSUPP; switch (cmd) { case SIOCGMIIPHY: data->phy_id = adapter->hw.phy.addr; break; case SIOCGMIIREG: if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, &data->val_out)) return -EIO; break; case SIOCSMIIREG: default: return -EOPNOTSUPP; } return 0; } /** * igb_hwtstamp_ioctl - control hardware time stamping * @netdev: * @ifreq: * @cmd: * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't case any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware * filters. Not all combinations are supported, in particular event * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". * **/ static int igb_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct hwtstamp_config config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 tsync_rx_cfg = 0; bool is_l4 = false; bool is_l2 = false; u32 regval; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; /* reserved for future extensions */ if (config.flags) return -EINVAL; switch (config.tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_ALL: /* * register TSYNCRXCFG must be set, therefore it is not * possible to time stamp both Sync and Delay_Req messages * => fall back to time stamping all packets */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; config.rx_filter = HWTSTAMP_FILTER_ALL; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; is_l2 = true; is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; is_l2 = true; is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; break; default: return -ERANGE; } if (hw->mac.type == e1000_82575) { if (tsync_rx_ctl | tsync_tx_ctl) return -EINVAL; return 0; } /* * Per-packet timestamping only works if all packets are * timestamped, so enable timestamping in all packets as * long as one rx filter was configured. */ if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) { tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; } /* enable/disable TX */ regval = rd32(E1000_TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; wr32(E1000_TSYNCTXCTL, regval); /* enable/disable RX */ regval = rd32(E1000_TSYNCRXCTL); regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; wr32(E1000_TSYNCRXCTL, regval); /* define which PTP packets are time stamped */ wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); /* define ethertype filter for timestamped packets */ if (is_l2) wr32(E1000_ETQF(3), (E1000_ETQF_FILTER_ENABLE | /* enable filter */ E1000_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else wr32(E1000_ETQF(3), 0); #define PTP_PORT 319 /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { u32 ftqf = (IPPROTO_UDP /* UDP */ | E1000_FTQF_VF_BP /* VF not compared */ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ | E1000_FTQF_MASK); /* mask all inputs */ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ wr32(E1000_IMIR(3), htons(PTP_PORT)); wr32(E1000_IMIREXT(3), (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); if (hw->mac.type == e1000_82576) { /* enable source port check */ wr32(E1000_SPQF(3), htons(PTP_PORT)); ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; } wr32(E1000_FTQF(3), ftqf); } else { wr32(E1000_FTQF(3), E1000_FTQF_MASK); } wrfl(); adapter->hwtstamp_config = config; /* clear TX/RX time stamp registers, just to be sure */ regval = rd32(E1000_TXSTMPH); regval = rd32(E1000_RXSTMPH); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } /** * igb_ioctl - * @netdev: * @ifreq: * @cmd: **/ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); case SIOCSHWTSTAMP: return igb_hwtstamp_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; u16 cap_offset; cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); if (!cap_offset) return -E1000_ERR_CONFIG; pci_read_config_word(adapter->pdev, cap_offset + reg, value); return 0; } s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; u16 cap_offset; cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); if (!cap_offset) return -E1000_ERR_CONFIG; pci_write_config_word(adapter->pdev, cap_offset + reg, *value); return 0; } static void igb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl; igb_irq_disable(adapter); adapter->vlgrp = grp; if (grp) { /* enable VLAN tag insert/strip */ ctrl = rd32(E1000_CTRL); ctrl |= E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); /* Disable CFI check */ rctl = rd32(E1000_RCTL); rctl &= ~E1000_RCTL_CFIEN; wr32(E1000_RCTL, rctl); } else { /* disable VLAN tag insert/strip */ ctrl = rd32(E1000_CTRL); ctrl &= ~E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); } igb_rlpml_set(adapter); if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); } static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; /* attempt to add filter to vlvf array */ igb_vlvf_set(adapter, vid, true, pf_id); /* add the filter since PF can receive vlans w/o entry in vlvf */ igb_vfta_set(hw, vid, true); } static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; s32 err; igb_irq_disable(adapter); vlan_group_set_device(adapter->vlgrp, vid, NULL); if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); /* remove vlan from VLVF table array */ err = igb_vlvf_set(adapter, vid, false, pf_id); /* if vid was not present in VLVF just remove it from table */ if (err) igb_vfta_set(hw, vid, false); } static void igb_restore_vlan(struct igb_adapter *adapter) { igb_vlan_rx_register(adapter->netdev, adapter->vlgrp); if (adapter->vlgrp) { u16 vid; for (vid = 0; vid < VLAN_N_VID; vid++) { if (!vlan_group_get_device(adapter->vlgrp, vid)) continue; igb_vlan_rx_add_vid(adapter->netdev, vid); } } } int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) { struct pci_dev *pdev = adapter->pdev; struct e1000_mac_info *mac = &adapter->hw.mac; mac->autoneg = 0; /* Make sure dplx is at most 1 bit and lsb of speed is not set * for the switch() below to work */ if ((spd & 1) || (dplx & ~1)) goto err_inval; /* Fiber NIC's only allow 1000 Gbps Full duplex */ if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && spd != SPEED_1000 && dplx != DUPLEX_FULL) goto err_inval; switch (spd + dplx) { case SPEED_10 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_10_HALF; break; case SPEED_10 + DUPLEX_FULL: mac->forced_speed_duplex = ADVERTISE_10_FULL; break; case SPEED_100 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_100_HALF; break; case SPEED_100 + DUPLEX_FULL: mac->forced_speed_duplex = ADVERTISE_100_FULL; break; case SPEED_1000 + DUPLEX_FULL: mac->autoneg = 1; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: goto err_inval; } return 0; err_inval: dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); if (netif_running(netdev)) igb_close(netdev); igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; if (wufc) { igb_setup_rctl(adapter); igb_set_rx_mode(netdev); /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & E1000_WUFC_MC) { rctl = rd32(E1000_RCTL); rctl |= E1000_RCTL_MPE; wr32(E1000_RCTL, rctl); } ctrl = rd32(E1000_CTRL); /* advertise wake from D3Cold */ #define E1000_CTRL_ADVD3WUC 0x00100000 /* phy power management enable */ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; wr32(E1000_CTRL, ctrl); /* Allow time for pending master requests to run */ igb_disable_pcie_master(hw); wr32(E1000_WUC, E1000_WUC_PME_EN); wr32(E1000_WUFC, wufc); } else { wr32(E1000_WUC, 0); wr32(E1000_WUFC, 0); } *enable_wake = wufc || adapter->en_mng_pt; if (!*enable_wake) igb_power_down_link(adapter); else igb_power_up_link(adapter); /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ igb_release_hw_control(adapter); pci_disable_device(pdev); return 0; } #ifdef CONFIG_PM static int igb_suspend(struct pci_dev *pdev, pm_message_t state) { int retval; bool wake; retval = __igb_shutdown(pdev, &wake); if (retval) return retval; if (wake) { pci_prepare_to_sleep(pdev); } else { pci_wake_from_d3(pdev, false); pci_set_power_state(pdev, PCI_D3hot); } return 0; } static int igb_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "igb: Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } igb_reset(adapter); /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); wr32(E1000_WUS, ~0); if (netif_running(netdev)) { err = igb_open(netdev); if (err) return err; } netif_device_attach(netdev); return 0; } #endif static void igb_shutdown(struct pci_dev *pdev) { bool wake; __igb_shutdown(pdev, &wake); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); pci_set_power_state(pdev, PCI_D3hot); } } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void igb_netpoll(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int i; if (!adapter->msix_entries) { struct igb_q_vector *q_vector = adapter->q_vector[0]; igb_irq_disable(adapter); napi_schedule(&q_vector->napi); return; } for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; wr32(E1000_EIMC, q_vector->eims_value); napi_schedule(&q_vector->napi); } } #endif /* CONFIG_NET_POLL_CONTROLLER */ /** * igb_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) igb_down(adapter); pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * igb_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the igb_resume routine. */ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; pci_ers_result_t result; int err; if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); igb_reset(adapter); wr32(E1000_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err) { dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " "failed 0x%0x\n", err); /* non-fatal, continue */ } return result; } /** * igb_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the igb_resume routine. */ static void igb_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) { if (igb_up(adapter)) { dev_err(&pdev->dev, "igb_up failed after reset\n"); return; } } netif_device_attach(netdev); /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); } static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, u8 qsel) { u32 rar_low, rar_high; struct e1000_hw *hw = &adapter->hw; /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* Indicate to hardware the Address is Valid. */ rar_high |= E1000_RAH_AV; if (hw->mac.type == e1000_82575) rar_high |= E1000_RAH_POOL_1 * qsel; else rar_high |= E1000_RAH_POOL_1 << qsel; wr32(E1000_RAL(index), rar_low); wrfl(); wr32(E1000_RAH(index), rar_high); wrfl(); } static int igb_set_vf_mac(struct igb_adapter *adapter, int vf, unsigned char *mac_addr) { struct e1000_hw *hw = &adapter->hw; /* VF MAC addresses start at end of receive addresses and moves * torwards the first, as a result a collision should not be possible */ int rar_entry = hw->mac.rar_entry_count - (vf + 1); memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); return 0; } static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct igb_adapter *adapter = netdev_priv(netdev); if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) return -EINVAL; adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" " change effective."); if (test_bit(__IGB_DOWN, &adapter->state)) { dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," " but the PF device is not up.\n"); dev_warn(&adapter->pdev->dev, "Bring the PF device up before" " attempting to use the VF device.\n"); } return igb_set_vf_mac(adapter, vf, mac); } static int igb_link_mbps(int internal_link_speed) { switch (internal_link_speed) { case SPEED_100: return 100; case SPEED_1000: return 1000; default: return 0; } } static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, int link_speed) { int rf_dec, rf_int; u32 bcnrc_val; if (tx_rate != 0) { /* Calculate the rate factor values to set */ rf_int = link_speed / tx_rate; rf_dec = (link_speed - (rf_int * tx_rate)); rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; bcnrc_val = E1000_RTTBCNRC_RS_ENA; bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & E1000_RTTBCNRC_RF_INT_MASK); bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); } else { bcnrc_val = 0; } wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ wr32(E1000_RTTBCNRC, bcnrc_val); } static void igb_check_vf_rate_limit(struct igb_adapter *adapter) { int actual_link_speed, i; bool reset_rate = false; /* VF TX rate limit was not set or not supported */ if ((adapter->vf_rate_link_speed == 0) || (adapter->hw.mac.type != e1000_82576)) return; actual_link_speed = igb_link_mbps(adapter->link_speed); if (actual_link_speed != adapter->vf_rate_link_speed) { reset_rate = true; adapter->vf_rate_link_speed = 0; dev_info(&adapter->pdev->dev, "Link speed has been changed. VF Transmit " "rate is disabled\n"); } for (i = 0; i < adapter->vfs_allocated_count; i++) { if (reset_rate) adapter->vf_data[i].tx_rate = 0; igb_set_vf_rate_limit(&adapter->hw, i, adapter->vf_data[i].tx_rate, actual_link_speed); } } static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int actual_link_speed; if (hw->mac.type != e1000_82576) return -EOPNOTSUPP; actual_link_speed = igb_link_mbps(adapter->link_speed); if ((vf >= adapter->vfs_allocated_count) || (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || (tx_rate < 0) || (tx_rate > actual_link_speed)) return -EINVAL; adapter->vf_rate_link_speed = actual_link_speed; adapter->vf_data[vf].tx_rate = (u16)tx_rate; igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); return 0; } static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct igb_adapter *adapter = netdev_priv(netdev); if (vf >= adapter->vfs_allocated_count) return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); ivi->tx_rate = adapter->vf_data[vf].tx_rate; ivi->vlan = adapter->vf_data[vf].pf_vlan; ivi->qos = adapter->vf_data[vf].pf_qos; return 0; } static void igb_vmm_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; switch (hw->mac.type) { case e1000_82575: default: /* replication is not supported for 82575 */ return; case e1000_82576: /* notify HW that the MAC is adding vlan tags */ reg = rd32(E1000_DTXCTL); reg |= E1000_DTXCTL_VLAN_ADDED; wr32(E1000_DTXCTL, reg); case e1000_82580: /* enable replication vlan tag stripping */ reg = rd32(E1000_RPLOLR); reg |= E1000_RPLOLR_STRVLAN; wr32(E1000_RPLOLR, reg); case e1000_i350: /* none of the above registers are supported by i350 */ break; } if (adapter->vfs_allocated_count) { igb_vmdq_set_loopback_pf(hw, true); igb_vmdq_set_replication_pf(hw, true); igb_vmdq_set_anti_spoofing_pf(hw, true, adapter->vfs_allocated_count); } else { igb_vmdq_set_loopback_pf(hw, false); igb_vmdq_set_replication_pf(hw, false); } } /* igb_main.c */
gpl-2.0
phalf/android_kernel_samsung_mint-vlx-all
drivers/net/igb/igb_main.c
1562
193896
/******************************************************************************* Intel(R) Gigabit Ethernet Linux driver Copyright(c) 2007-2009 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/netdevice.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/net_tstamp.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/aer.h> #include <linux/prefetch.h> #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif #include "igb.h" #define MAJ 3 #define MIN 0 #define BUILD 6 #define KFIX 2 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" __stringify(KFIX) char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, }; static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, igb_pci_tbl); void igb_reset(struct igb_adapter *); static int igb_setup_all_tx_resources(struct igb_adapter *); static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); static void igb_init_hw_timer(struct igb_adapter *adapter); static int igb_sw_init(struct igb_adapter *); static int igb_open(struct net_device *); static int igb_close(struct net_device *); static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_tx_ring(struct igb_ring *); static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter); static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *); static irqreturn_t igb_msix_ring(int irq, void *); #ifdef CONFIG_IGB_DCA static void igb_update_dca(struct igb_q_vector *); static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ static bool igb_clean_tx_irq(struct igb_q_vector *); static int igb_poll(struct napi_struct *, int); static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); static void igb_vlan_rx_register(struct net_device *, struct vlan_group *); static void igb_vlan_rx_add_vid(struct net_device *, u16); static void igb_vlan_rx_kill_vid(struct net_device *, u16); static void igb_restore_vlan(struct igb_adapter *); static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); static void igb_ping_all_vfs(struct igb_adapter *); static void igb_msg_task(struct igb_adapter *); static void igb_vmm_control(struct igb_adapter *); static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PM static int igb_suspend(struct pci_dev *, pm_message_t); static int igb_resume(struct pci_dev *); #endif static void igb_shutdown(struct pci_dev *); #ifdef CONFIG_IGB_DCA static int igb_notify_dca(struct notifier_block *, unsigned long, void *); static struct notifier_block dca_notifier = { .notifier_call = igb_notify_dca, .next = NULL, .priority = 0 }; #endif #ifdef CONFIG_NET_POLL_CONTROLLER /* for netdump / net console */ static void igb_netpoll(struct net_device *); #endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs = 0; module_param(max_vfs, uint, 0); MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " "per physical function"); #endif /* CONFIG_PCI_IOV */ static pci_ers_result_t igb_io_error_detected(struct pci_dev *, pci_channel_state_t); static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); static void igb_io_resume(struct pci_dev *); static struct pci_error_handlers igb_err_handler = { .error_detected = igb_io_error_detected, .slot_reset = igb_io_slot_reset, .resume = igb_io_resume, }; static struct pci_driver igb_driver = { .name = igb_driver_name, .id_table = igb_pci_tbl, .probe = igb_probe, .remove = __devexit_p(igb_remove), #ifdef CONFIG_PM /* Power Management Hooks */ .suspend = igb_suspend, .resume = igb_resume, #endif .shutdown = igb_shutdown, .err_handler = &igb_err_handler }; MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); struct igb_reg_info { u32 ofs; char *name; }; static const struct igb_reg_info igb_reg_info_tbl[] = { /* General Registers */ {E1000_CTRL, "CTRL"}, {E1000_STATUS, "STATUS"}, {E1000_CTRL_EXT, "CTRL_EXT"}, /* Interrupt Registers */ {E1000_ICR, "ICR"}, /* RX Registers */ {E1000_RCTL, "RCTL"}, {E1000_RDLEN(0), "RDLEN"}, {E1000_RDH(0), "RDH"}, {E1000_RDT(0), "RDT"}, {E1000_RXDCTL(0), "RXDCTL"}, {E1000_RDBAL(0), "RDBAL"}, {E1000_RDBAH(0), "RDBAH"}, /* TX Registers */ {E1000_TCTL, "TCTL"}, {E1000_TDBAL(0), "TDBAL"}, {E1000_TDBAH(0), "TDBAH"}, {E1000_TDLEN(0), "TDLEN"}, {E1000_TDH(0), "TDH"}, {E1000_TDT(0), "TDT"}, {E1000_TXDCTL(0), "TXDCTL"}, {E1000_TDFH, "TDFH"}, {E1000_TDFT, "TDFT"}, {E1000_TDFHS, "TDFHS"}, {E1000_TDFPC, "TDFPC"}, /* List Terminator */ {} }; /* * igb_regdump - register printout routine */ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) { int n = 0; char rname[16]; u32 regs[8]; switch (reginfo->ofs) { case E1000_RDLEN(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDLEN(n)); break; case E1000_RDH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDH(n)); break; case E1000_RDT(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDT(n)); break; case E1000_RXDCTL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RXDCTL(n)); break; case E1000_RDBAL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDBAL(n)); break; case E1000_RDBAH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDBAH(n)); break; case E1000_TDBAL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_RDBAL(n)); break; case E1000_TDBAH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDBAH(n)); break; case E1000_TDLEN(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDLEN(n)); break; case E1000_TDH(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDH(n)); break; case E1000_TDT(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TDT(n)); break; case E1000_TXDCTL(0): for (n = 0; n < 4; n++) regs[n] = rd32(E1000_TXDCTL(n)); break; default: printk(KERN_INFO "%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); printk(KERN_INFO "%-15s ", rname); for (n = 0; n < 4; n++) printk(KERN_CONT "%08x ", regs[n]); printk(KERN_CONT "\n"); } /* * igb_dump - Print registers, tx-rings and rx-rings */ static void igb_dump(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct igb_reg_info *reginfo; int n = 0; struct igb_ring *tx_ring; union e1000_adv_tx_desc *tx_desc; struct my_u0 { u64 a; u64 b; } *u0; struct igb_buffer *buffer_info; struct igb_ring *rx_ring; union e1000_adv_rx_desc *rx_desc; u32 staterr; int i = 0; if (!netif_msg_hw(adapter)) return; /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); printk(KERN_INFO "Device Name state " "trans_start last_rx\n"); printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", netdev->name, netdev->state, netdev->trans_start, netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); printk(KERN_INFO " Register Name Value\n"); for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; reginfo->name; reginfo++) { igb_regdump(hw, reginfo); } /* Print TX Ring Summary */ if (!netdev || !netif_running(netdev)) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" " leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, (u64)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (u64)buffer_info->time_stamp); } /* Print TX Rings */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); /* Transmit Descriptor Formats * * Advanced Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +--------------------------------------------------------------+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | * +--------------------------------------------------------------+ * 63 46 45 40 39 38 36 35 32 31 24 15 0 */ for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "T [desc] [address 63:0 ] " "[PlPOCIStDDM Ln] [bi->dma ] " "leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; u0 = (struct my_u0 *)tx_desc; printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" " %04X %3X %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (u64)buffer_info->time_stamp, buffer_info->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) printk(KERN_CONT " NTC/U\n"); else if (i == tx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == tx_ring->next_to_clean) printk(KERN_CONT " NTC\n"); else printk(KERN_CONT "\n"); if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt(buffer_info->dma), buffer_info->length, true); } } /* Print RX Rings Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); printk(KERN_INFO "Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; printk(KERN_INFO " %5d %5X %5X\n", n, rx_ring->next_to_use, rx_ring->next_to_clean); } /* Print RX Rings */ if (!netif_msg_rx_status(adapter)) goto exit; dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); /* Advanced Receive Descriptor (Read) Format * 63 1 0 * +-----------------------------------------------------+ * 0 | Packet Buffer Address [63:1] |A0/NSE| * +----------------------------------------------+------+ * 8 | Header Buffer Address [63:1] | DD | * +-----------------------------------------------------+ * * * Advanced Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 30 21 20 17 16 4 3 0 * +------------------------------------------------------+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | * | Checksum Ident | | | | Type | Type | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); printk(KERN_INFO "------------------------------------\n"); printk(KERN_INFO "R [desc] [ PktBuf A0] " "[ HeadBuf DD] [bi->dma ] [bi->skb] " "<-- Adv Rx Read format\n"); printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " "[vl er S cks ln] ---------------- [bi->skb] " "<-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ printk(KERN_INFO "RWB[0x%03X] %016llX " "%016llX ---------------- %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), buffer_info->skb); } else { printk(KERN_INFO "R [0x%03X] %016llX " "%016llX %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, buffer_info->skb); if (netif_msg_pktdata(adapter)) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt(buffer_info->dma), rx_ring->rx_buffer_len, true); if (rx_ring->rx_buffer_len < IGB_RXBUFFER_1024) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt( buffer_info->page_dma + buffer_info->page_offset), PAGE_SIZE/2, true); } } if (i == rx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == rx_ring->next_to_clean) printk(KERN_CONT " NTC\n"); else printk(KERN_CONT "\n"); } } exit: return; } /** * igb_read_clock - read raw cycle counter (to be used by time counter) */ static cycle_t igb_read_clock(const struct cyclecounter *tc) { struct igb_adapter *adapter = container_of(tc, struct igb_adapter, cycles); struct e1000_hw *hw = &adapter->hw; u64 stamp = 0; int shift = 0; /* * The timestamp latches on lowest register read. For the 82580 * the lowest register is SYSTIMR instead of SYSTIML. However we never * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. */ if (hw->mac.type == e1000_82580) { stamp = rd32(E1000_SYSTIMR) >> 8; shift = IGB_82580_TSYNC_SHIFT; } stamp |= (u64)rd32(E1000_SYSTIML) << shift; stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); return stamp; } /** * igb_get_hw_dev - return device * used by hardware layer to print debugging information **/ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) { struct igb_adapter *adapter = hw->back; return adapter->netdev; } /** * igb_init_module - Driver Registration Routine * * igb_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init igb_init_module(void) { int ret; printk(KERN_INFO "%s - version %s\n", igb_driver_string, igb_driver_version); printk(KERN_INFO "%s\n", igb_copyright); #ifdef CONFIG_IGB_DCA dca_register_notify(&dca_notifier); #endif ret = pci_register_driver(&igb_driver); return ret; } module_init(igb_init_module); /** * igb_exit_module - Driver Exit Cleanup Routine * * igb_exit_module is called just before the driver is removed * from memory. **/ static void __exit igb_exit_module(void) { #ifdef CONFIG_IGB_DCA dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&igb_driver); } module_exit(igb_exit_module); #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) /** * igb_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize * * Once we know the feature-set enabled for the device, we'll cache * the register offset the descriptor ring is assigned to. **/ static void igb_cache_ring_register(struct igb_adapter *adapter) { int i = 0, j = 0; u32 rbase_offset = adapter->vfs_allocated_count; switch (adapter->hw.mac.type) { case e1000_82576: /* The queues are allocated for virtualization such that VF 0 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. * In order to avoid collision we start at the first free queue * and continue consuming queues in the same sequence */ if (adapter->vfs_allocated_count) { for (; i < adapter->rss_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); } case e1000_82575: case e1000_82580: case e1000_i350: default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; for (; j < adapter->num_tx_queues; j++) adapter->tx_ring[j]->reg_idx = rbase_offset + j; break; } } static void igb_free_queues(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) { kfree(adapter->tx_ring[i]); adapter->tx_ring[i] = NULL; } for (i = 0; i < adapter->num_rx_queues; i++) { kfree(adapter->rx_ring[i]); adapter->rx_ring[i] = NULL; } adapter->num_rx_queues = 0; adapter->num_tx_queues = 0; } /** * igb_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. **/ static int igb_alloc_queues(struct igb_adapter *adapter) { struct igb_ring *ring; int i; for (i = 0; i < adapter->num_tx_queues; i++) { ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->tx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* For 82575, context index must be unique per ring. */ if (adapter->hw.mac.type == e1000_82575) ring->flags = IGB_RING_FLAG_TX_CTX_IDX; adapter->tx_ring[i] = ring; } for (i = 0; i < adapter->num_rx_queues; i++) { ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->rx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ /* set flag indicating ring supports SCTP checksum offload */ if (adapter->hw.mac.type >= e1000_82576) ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; adapter->rx_ring[i] = ring; } igb_cache_ring_register(adapter); return 0; err: igb_free_queues(adapter); return -ENOMEM; } #define IGB_N0_QUEUE -1 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) { u32 msixbm = 0; struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; u32 ivar, index; int rx_queue = IGB_N0_QUEUE; int tx_queue = IGB_N0_QUEUE; if (q_vector->rx_ring) rx_queue = q_vector->rx_ring->reg_idx; if (q_vector->tx_ring) tx_queue = q_vector->tx_ring->reg_idx; switch (hw->mac.type) { case e1000_82575: /* The 82575 assigns vectors using a bitmask, which matches the bitmask for the EICR/EIMS/EIMC registers. To assign one or more queues to a vector, we write the appropriate bits into the MSIXBM register for that vector. */ if (rx_queue > IGB_N0_QUEUE) msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; if (!adapter->msix_entries && msix_vector == 0) msixbm |= E1000_EIMS_OTHER; array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; break; case e1000_82576: /* 82576 uses a table-based method for assigning vectors. Each queue has a single entry in the table to which we write a vector number along with a "valid" bit. Sadly, the layout of the table is somewhat counterintuitive. */ if (rx_queue > IGB_N0_QUEUE) { index = (rx_queue & 0x7); ivar = array_rd32(E1000_IVAR0, index); if (rx_queue < 8) { /* vector goes into low byte of register */ ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } else { /* vector goes into third byte of register */ ivar = ivar & 0xFF00FFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 16; } array_wr32(E1000_IVAR0, index, ivar); } if (tx_queue > IGB_N0_QUEUE) { index = (tx_queue & 0x7); ivar = array_rd32(E1000_IVAR0, index); if (tx_queue < 8) { /* vector goes into second byte of register */ ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } else { /* vector goes into high byte of register */ ivar = ivar & 0x00FFFFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 24; } array_wr32(E1000_IVAR0, index, ivar); } q_vector->eims_value = 1 << msix_vector; break; case e1000_82580: case e1000_i350: /* 82580 uses the same table-based approach as 82576 but has fewer entries as a result we carry over for queues greater than 4. */ if (rx_queue > IGB_N0_QUEUE) { index = (rx_queue >> 1); ivar = array_rd32(E1000_IVAR0, index); if (rx_queue & 0x1) { /* vector goes into third byte of register */ ivar = ivar & 0xFF00FFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 16; } else { /* vector goes into low byte of register */ ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } array_wr32(E1000_IVAR0, index, ivar); } if (tx_queue > IGB_N0_QUEUE) { index = (tx_queue >> 1); ivar = array_rd32(E1000_IVAR0, index); if (tx_queue & 0x1) { /* vector goes into high byte of register */ ivar = ivar & 0x00FFFFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 24; } else { /* vector goes into second byte of register */ ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } array_wr32(E1000_IVAR0, index, ivar); } q_vector->eims_value = 1 << msix_vector; break; default: BUG(); break; } /* add q_vector eims value to global eims_enable_mask */ adapter->eims_enable_mask |= q_vector->eims_value; /* configure q_vector to set itr on first interrupt */ q_vector->set_itr = 1; } /** * igb_configure_msix - Configure MSI-X hardware * * igb_configure_msix sets up the hardware to properly * generate MSI-X interrupts. **/ static void igb_configure_msix(struct igb_adapter *adapter) { u32 tmp; int i, vector = 0; struct e1000_hw *hw = &adapter->hw; adapter->eims_enable_mask = 0; /* set vector for other causes, i.e. link changes */ switch (hw->mac.type) { case e1000_82575: tmp = rd32(E1000_CTRL_EXT); /* enable MSI-X PBA support*/ tmp |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask interrupts upon ICR read. */ tmp |= E1000_CTRL_EXT_EIAME; tmp |= E1000_CTRL_EXT_IRCA; wr32(E1000_CTRL_EXT, tmp); /* enable msix_other interrupt */ array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); adapter->eims_other = E1000_EIMS_OTHER; break; case e1000_82576: case e1000_82580: case e1000_i350: /* Turn on MSI-X capability first, or our settings * won't stick. And it will take days to debug. */ wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_PBA | E1000_GPIE_EIAME | E1000_GPIE_NSICR); /* enable msix_other interrupt */ adapter->eims_other = 1 << vector; tmp = (vector++ | E1000_IVAR_VALID) << 8; wr32(E1000_IVAR_MISC, tmp); break; default: /* do nothing, since nothing else supports MSI-X */ break; } /* switch (hw->mac.type) */ adapter->eims_enable_mask |= adapter->eims_other; for (i = 0; i < adapter->num_q_vectors; i++) igb_assign_vector(adapter->q_vector[i], vector++); wrfl(); } /** * igb_request_msix - Initialize MSI-X interrupts * * igb_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. **/ static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; int i, err = 0, vector = 0; err = request_irq(adapter->msix_entries[vector].vector, igb_msix_other, 0, netdev->name, adapter); if (err) goto out; vector++; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); if (q_vector->rx_ring && q_vector->tx_ring) sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, q_vector->rx_ring->queue_index); else if (q_vector->tx_ring) sprintf(q_vector->name, "%s-tx-%u", netdev->name, q_vector->tx_ring->queue_index); else if (q_vector->rx_ring) sprintf(q_vector->name, "%s-rx-%u", netdev->name, q_vector->rx_ring->queue_index); else sprintf(q_vector->name, "%s-unused", netdev->name); err = request_irq(adapter->msix_entries[vector].vector, igb_msix_ring, 0, q_vector->name, q_vector); if (err) goto out; vector++; } igb_configure_msix(adapter); return 0; out: return err; } static void igb_reset_interrupt_capability(struct igb_adapter *adapter) { if (adapter->msix_entries) { pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else if (adapter->flags & IGB_FLAG_HAS_MSI) { pci_disable_msi(adapter->pdev); } } /** * igb_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void igb_free_q_vectors(struct igb_adapter *adapter) { int v_idx; for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; adapter->q_vector[v_idx] = NULL; if (!q_vector) continue; netif_napi_del(&q_vector->napi); kfree(q_vector); } adapter->num_q_vectors = 0; } /** * igb_clear_interrupt_scheme - reset the device to a state of no interrupts * * This function resets the device so that it has 0 rx queues, tx queues, and * MSI-X interrupts allocated. */ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) { igb_free_queues(adapter); igb_free_q_vectors(adapter); igb_reset_interrupt_capability(adapter); } /** * igb_set_interrupt_capability - set MSI or MSI-X if supported * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int igb_set_interrupt_capability(struct igb_adapter *adapter) { int err; int numvecs, i; /* Number of supported queues. */ adapter->num_rx_queues = adapter->rss_queues; if (adapter->vfs_allocated_count) adapter->num_tx_queues = 1; else adapter->num_tx_queues = adapter->rss_queues; /* start with one vector for every rx queue */ numvecs = adapter->num_rx_queues; /* if tx handler is separate add 1 for every tx queue */ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) numvecs += adapter->num_tx_queues; /* store the number of vectors reserved for queues */ adapter->num_q_vectors = numvecs; /* add 1 vector for link status interrupts */ numvecs++; adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) goto msi_only; for (i = 0; i < numvecs; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix(adapter->pdev, adapter->msix_entries, numvecs); if (err == 0) goto out; igb_reset_interrupt_capability(adapter); /* If we can't do MSI-X, try MSI */ msi_only: #ifdef CONFIG_PCI_IOV /* disable SR-IOV for non MSI-X configurations */ if (adapter->vf_data) { struct e1000_hw *hw = &adapter->hw; /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); msleep(500); kfree(adapter->vf_data); adapter->vf_data = NULL; wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); msleep(100); dev_info(&adapter->pdev->dev, "IOV Disabled\n"); } #endif adapter->vfs_allocated_count = 0; adapter->rss_queues = 1; adapter->flags |= IGB_FLAG_QUEUE_PAIRS; adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_q_vectors = 1; if (!pci_enable_msi(adapter->pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; out: /* Notify the stack of the (possibly) reduced queue counts. */ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); return netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); } /** * igb_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int igb_alloc_q_vectors(struct igb_adapter *adapter) { struct igb_q_vector *q_vector; struct e1000_hw *hw = &adapter->hw; int v_idx; for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; q_vector->itr_register = hw->hw_addr + E1000_EITR(0); q_vector->itr_val = IGB_START_ITR; netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); adapter->q_vector[v_idx] = q_vector; } return 0; err_out: igb_free_q_vectors(adapter); return -ENOMEM; } static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, int ring_idx, int v_idx) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; q_vector->rx_ring = adapter->rx_ring[ring_idx]; q_vector->rx_ring->q_vector = q_vector; q_vector->itr_val = adapter->rx_itr_setting; if (q_vector->itr_val && q_vector->itr_val <= 3) q_vector->itr_val = IGB_START_ITR; } static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, int ring_idx, int v_idx) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; q_vector->tx_ring = adapter->tx_ring[ring_idx]; q_vector->tx_ring->q_vector = q_vector; q_vector->itr_val = adapter->tx_itr_setting; if (q_vector->itr_val && q_vector->itr_val <= 3) q_vector->itr_val = IGB_START_ITR; } /** * igb_map_ring_to_vector - maps allocated queues to vectors * * This function maps the recently allocated queues to vectors. **/ static int igb_map_ring_to_vector(struct igb_adapter *adapter) { int i; int v_idx = 0; if ((adapter->num_q_vectors < adapter->num_rx_queues) || (adapter->num_q_vectors < adapter->num_tx_queues)) return -ENOMEM; if (adapter->num_q_vectors >= (adapter->num_rx_queues + adapter->num_tx_queues)) { for (i = 0; i < adapter->num_rx_queues; i++) igb_map_rx_ring_to_vector(adapter, i, v_idx++); for (i = 0; i < adapter->num_tx_queues; i++) igb_map_tx_ring_to_vector(adapter, i, v_idx++); } else { for (i = 0; i < adapter->num_rx_queues; i++) { if (i < adapter->num_tx_queues) igb_map_tx_ring_to_vector(adapter, i, v_idx); igb_map_rx_ring_to_vector(adapter, i, v_idx++); } for (; i < adapter->num_tx_queues; i++) igb_map_tx_ring_to_vector(adapter, i, v_idx++); } return 0; } /** * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors * * This function initializes the interrupts and allocates all of the queues. **/ static int igb_init_interrupt_scheme(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int err; err = igb_set_interrupt_capability(adapter); if (err) return err; err = igb_alloc_q_vectors(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); goto err_alloc_q_vectors; } err = igb_alloc_queues(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); goto err_alloc_queues; } err = igb_map_ring_to_vector(adapter); if (err) { dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); goto err_map_queues; } return 0; err_map_queues: igb_free_queues(adapter); err_alloc_queues: igb_free_q_vectors(adapter); err_alloc_q_vectors: igb_reset_interrupt_capability(adapter); return err; } /** * igb_request_irq - initialize interrupts * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int igb_request_irq(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err = 0; if (adapter->msix_entries) { err = igb_request_msix(adapter); if (!err) goto request_done; /* fall back to MSI */ igb_clear_interrupt_scheme(adapter); if (!pci_enable_msi(adapter->pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); adapter->num_tx_queues = 1; adapter->num_rx_queues = 1; adapter->num_q_vectors = 1; err = igb_alloc_q_vectors(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); goto request_done; } err = igb_alloc_queues(adapter); if (err) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); igb_free_q_vectors(adapter); goto request_done; } igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); } else { igb_assign_vector(adapter->q_vector[0], 0); } if (adapter->flags & IGB_FLAG_HAS_MSI) { err = request_irq(adapter->pdev->irq, igb_intr_msi, 0, netdev->name, adapter); if (!err) goto request_done; /* fall back to legacy interrupts */ igb_reset_interrupt_capability(adapter); adapter->flags &= ~IGB_FLAG_HAS_MSI; } err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED, netdev->name, adapter); if (err) dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", err); request_done: return err; } static void igb_free_irq(struct igb_adapter *adapter) { if (adapter->msix_entries) { int vector = 0, i; free_irq(adapter->msix_entries[vector++].vector, adapter); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; free_irq(adapter->msix_entries[vector++].vector, q_vector); } } else { free_irq(adapter->pdev->irq, adapter); } } /** * igb_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static void igb_irq_disable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; /* * we need to be careful when disabling interrupts. The VFs are also * mapped into these registers and so clearing the bits can cause * issues on the VF drivers so we only need to clear what we set */ if (adapter->msix_entries) { u32 regval = rd32(E1000_EIAM); wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); wr32(E1000_EIMC, adapter->eims_enable_mask); regval = rd32(E1000_EIAC); wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); } wr32(E1000_IAM, 0); wr32(E1000_IMC, ~0); wrfl(); if (adapter->msix_entries) { int i; for (i = 0; i < adapter->num_q_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } else { synchronize_irq(adapter->pdev->irq); } } /** * igb_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static void igb_irq_enable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) { u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC; u32 regval = rd32(E1000_EIAC); wr32(E1000_EIAC, regval | adapter->eims_enable_mask); regval = rd32(E1000_EIAM); wr32(E1000_EIAM, regval | adapter->eims_enable_mask); wr32(E1000_EIMS, adapter->eims_enable_mask); if (adapter->vfs_allocated_count) { wr32(E1000_MBVFIMR, 0xFF); ims |= E1000_IMS_VMMB; } if (adapter->hw.mac.type == e1000_82580) ims |= E1000_IMS_DRSTA; wr32(E1000_IMS, ims); } else { wr32(E1000_IMS, IMS_ENABLE_MASK | E1000_IMS_DRSTA); wr32(E1000_IAM, IMS_ENABLE_MASK | E1000_IMS_DRSTA); } } static void igb_update_mng_vlan(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */ igb_vfta_set(hw, vid, true); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; } if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && (vid != old_vid) && !vlan_group_get_device(adapter->vlgrp, old_vid)) { /* remove VID from filter table */ igb_vfta_set(hw, old_vid, false); } } /** * igb_release_hw_control - release control of the h/w to f/w * @adapter: address of board private structure * * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. * **/ static void igb_release_hw_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; /* Let firmware take over control of h/w */ ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } /** * igb_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. * **/ static void igb_get_hw_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; /* Let firmware know the driver has taken over */ ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } /** * igb_configure - configure the hardware for RX and TX * @adapter: private board structure **/ static void igb_configure(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; igb_get_hw_control(adapter); igb_set_rx_mode(netdev); igb_restore_vlan(adapter); igb_setup_tctl(adapter); igb_setup_mrqc(adapter); igb_setup_rctl(adapter); igb_configure_tx(adapter); igb_configure_rx(adapter); igb_rx_fifo_flush_82575(&adapter->hw); /* call igb_desc_unused which always leaves * at least 1 descriptor unused to make sure * next_to_use != next_to_clean */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); } } /** * igb_power_up_link - Power up the phy/serdes link * @adapter: address of board private structure **/ void igb_power_up_link(struct igb_adapter *adapter) { if (adapter->hw.phy.media_type == e1000_media_type_copper) igb_power_up_phy_copper(&adapter->hw); else igb_power_up_serdes_link_82575(&adapter->hw); } /** * igb_power_down_link - Power down the phy/serdes link * @adapter: address of board private structure */ static void igb_power_down_link(struct igb_adapter *adapter) { if (adapter->hw.phy.media_type == e1000_media_type_copper) igb_power_down_phy_copper_82575(&adapter->hw); else igb_shutdown_serdes_link_82575(&adapter->hw); } /** * igb_up - Open the interface and prepare it to handle traffic * @adapter: board private structure **/ int igb_up(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; /* hardware has been reset, we need to reload some things */ igb_configure(adapter); clear_bit(__IGB_DOWN, &adapter->state); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; napi_enable(&q_vector->napi); } if (adapter->msix_entries) igb_configure_msix(adapter); else igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ rd32(E1000_ICR); igb_irq_enable(adapter); /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { u32 reg_data = rd32(E1000_CTRL_EXT); reg_data |= E1000_CTRL_EXT_PFRSTD; wr32(E1000_CTRL_EXT, reg_data); } netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); return 0; } void igb_down(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; u32 tctl, rctl; int i; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__IGB_DOWN, &adapter->state); /* disable receives in the hardware */ rctl = rd32(E1000_RCTL); wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ tctl = rd32(E1000_TCTL); tctl &= ~E1000_TCTL_EN; wr32(E1000_TCTL, tctl); /* flush both disables and wait for them to finish */ wrfl(); msleep(10); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; napi_disable(&q_vector->napi); } igb_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); netif_carrier_off(netdev); /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); spin_unlock(&adapter->stats64_lock); adapter->link_speed = 0; adapter->link_duplex = 0; if (!pci_channel_offline(adapter->pdev)) igb_reset(adapter); igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); #ifdef CONFIG_IGB_DCA /* since we reset the hardware DCA settings were cleared */ igb_setup_dca(adapter); #endif } void igb_reinit_locked(struct igb_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); igb_down(adapter); igb_up(adapter); clear_bit(__IGB_RESETTING, &adapter->state); } void igb_reset(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; u32 pba = 0, tx_space, min_tx_space, min_rx_space; u16 hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. */ switch (mac->type) { case e1000_i350: case e1000_82580: pba = rd32(E1000_RXPBS); pba = igb_rxpbs_adjust_82580(pba); break; case e1000_82576: pba = rd32(E1000_RXPBS); pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82575: default: pba = E1000_PBA_34K; break; } if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && (mac->type < e1000_82576)) { /* adjust PBA for jumbo frames */ wr32(E1000_PBA, pba); /* To maintain wire speed transmits, the Tx FIFO should be * large enough to accommodate two full transmit packets, * rounded up to the next 1KB and expressed in KB. Likewise, * the Rx FIFO should be large enough to accommodate at least * one full receive packet and is similarly rounded up and * expressed in KB. */ pba = rd32(E1000_PBA); /* upper 16 bits has Tx packet buffer allocation size in KB */ tx_space = pba >> 16; /* lower 16 bits has Rx packet buffer allocation size in KB */ pba &= 0xffff; /* the tx fifo also stores 16 bytes of information about the tx * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ min_rx_space = adapter->max_frame_size; min_rx_space = ALIGN(min_rx_space, 1024); min_rx_space >>= 10; /* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO * allocation, take space away from current Rx allocation */ if (tx_space < min_tx_space && ((min_tx_space - tx_space) < pba)) { pba = pba - (min_tx_space - tx_space); /* if short on rx space, rx wins and must trump tx * adjustment */ if (pba < min_rx_space) pba = min_rx_space; } wr32(E1000_PBA, pba); } /* flow control settings */ /* The high water mark must be low enough to fit one full frame * (or the size used for early receive) above it in the Rx FIFO. * Set it to the lower of: * - 90% of the Rx FIFO size, or * - the full Rx FIFO size minus one full frame */ hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - 2 * adapter->max_frame_size)); fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; fc->pause_time = 0xFFFF; fc->send_xon = 1; fc->current_mode = fc->requested_mode; /* disable receive for all VFs and wait one second */ if (adapter->vfs_allocated_count) { int i; for (i = 0 ; i < adapter->vfs_allocated_count; i++) adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; /* ping all the active vfs to let them know we are going down */ igb_ping_all_vfs(adapter); /* disable transmits and receives */ wr32(E1000_VFRE, 0); wr32(E1000_VFTE, 0); } /* Allow time for pending master requests to run */ hw->mac.ops.reset_hw(hw); wr32(E1000_WUC, 0); if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); if (hw->mac.type > e1000_82580) { if (adapter->flags & IGB_FLAG_DMAC) { u32 reg; /* * DMA Coalescing high water mark needs to be higher * than * the * Rx threshold. The Rx threshold is * currently * pba - 6, so we * should use a high water * mark of pba * - 4. */ hwm = (pba - 4) << 10; reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); /* watchdog timer= +-1000 usec in 32usec intervals */ reg |= (1000 >> 5); wr32(E1000_DMACR, reg); /* no lower threshold to disable coalescing(smart fifb) * -UTRESH=0*/ wr32(E1000_DMCRTRH, 0); /* set hwm to PBA - 2 * max frame size */ wr32(E1000_FCRTC, hwm); /* * This sets the time to wait before requesting tran- * sition to * low power state to number of usecs needed * to receive 1 512 * byte frame at gigabit line rate */ reg = rd32(E1000_DMCTLX); reg |= IGB_DMCTLX_DCFLUSH_DIS; /* Delay 255 usec before entering Lx state. */ reg |= 0xFF; wr32(E1000_DMCTLX, reg); /* free space in Tx packet buffer to wake from DMAC */ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); /* make low power state decision controlled by DMAC */ reg = rd32(E1000_PCIEMISC); reg |= E1000_PCIEMISC_LX_DECISION; wr32(E1000_PCIEMISC, reg); } /* end if IGB_FLAG_DMAC set */ } if (hw->mac.type == e1000_82580) { u32 reg = rd32(E1000_PCIEMISC); wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); } if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); igb_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); igb_get_phy_info(hw); } static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, .ndo_start_xmit = igb_xmit_frame_adv, .ndo_get_stats64 = igb_get_stats64, .ndo_set_rx_mode = igb_set_rx_mode, .ndo_set_multicast_list = igb_set_rx_mode, .ndo_set_mac_address = igb_set_mac, .ndo_change_mtu = igb_change_mtu, .ndo_do_ioctl = igb_ioctl, .ndo_tx_timeout = igb_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_register = igb_vlan_rx_register, .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, .ndo_set_vf_mac = igb_ndo_set_vf_mac, .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, .ndo_get_vf_config = igb_ndo_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = igb_netpoll, #endif }; /** * igb_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in igb_pci_tbl * * Returns 0 on success, negative on failure * * igb_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int __devinit igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct igb_adapter *adapter; struct e1000_hw *hw; u16 eeprom_data = 0; s32 ret_val; static int global_quad_port_a; /* global quad port a indication */ const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; unsigned long mmio_start, mmio_len; int err, pci_using_dac; u16 eeprom_apme_mask = IGB_EEPROM_APME; u8 part_str[E1000_PBANUM_LENGTH]; /* Catch broken hardware that put the wrong VF device ID in * the PCIe SR-IOV capability. */ if (pdev->is_virtfn) { WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev), pdev->vendor, pdev->device); return -EINVAL; } err = pci_enable_device_mem(pdev); if (err) return err; pci_using_dac = 0; err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); goto err_dma; } } } err = pci_request_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM), igb_driver_name); if (err) goto err_pci_reg; pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); err = -ENOMEM; netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_ABS_MAX_TX_QUEUES); if (!netdev) goto err_alloc_etherdev; SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); err = -EIO; hw->hw_addr = ioremap(mmio_start, mmio_len); if (!hw->hw_addr) goto err_ioremap; netdev->netdev_ops = &igb_netdev_ops; igb_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; /* Copy the default MAC, PHY and NVM function pointers */ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); /* Initialize skew-specific constants */ err = ei->get_invariants(hw); if (err) goto err_sw_init; /* setup the private structure */ err = igb_sw_init(adapter); if (err) goto err_sw_init; igb_get_bus_info_pcie(hw); hw->phy.autoneg_wait_to_complete = false; /* Copper options */ if (hw->phy.media_type == e1000_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; hw->phy.disable_polarity_correction = false; hw->phy.ms_type = e1000_ms_hw_default; } if (igb_check_reset_block(hw)) dev_info(&pdev->dev, "PHY reset is blocked due to SOL/IDER session.\n"); netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_GRO; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; } if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CSUM; adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); /* before reading the NVM, reset the controller to put the device in a * known good starting state */ hw->mac.ops.reset_hw(hw); /* make sure the NVM is good */ if (hw->nvm.ops.validate(hw) < 0) { dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } /* copy the MAC address out of the NVM */ if (hw->mac.ops.read_mac_addr(hw)) dev_err(&pdev->dev, "NVM Read Error\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; } setup_timer(&adapter->watchdog_timer, igb_watchdog, (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, igb_update_phy_info, (unsigned long) adapter); INIT_WORK(&adapter->reset_task, igb_reset_task); INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); /* Initialize link properties that are user-changeable */ adapter->fc_autoneg = true; hw->mac.autoneg = true; hw->phy.autoneg_advertised = 0x2f; hw->fc.requested_mode = e1000_fc_default; hw->fc.current_mode = e1000_fc_default; igb_validate_mdi_setting(hw); /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, * enable the ACPI Magic Packet filter */ if (hw->bus.func == 0) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); else if (hw->mac.type >= e1000_82580) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &eeprom_data); else if (hw->bus.func == 1) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); if (eeprom_data & eeprom_apme_mask) adapter->eeprom_wol |= E1000_WUFC_MAG; /* now that we have the eeprom settings, apply the special cases where * the eeprom may be wrong or the board simply won't support wake on * lan on a particular port */ switch (pdev->device) { case E1000_DEV_ID_82575GB_QUAD_COPPER: adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82575EB_FIBER_SERDES: case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; else adapter->flags |= IGB_FLAG_QUAD_PORT_A; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; } /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* reset the hardware with the new settings */ igb_reset(adapter); /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); } #endif /* do hw tstamp init after resetting */ igb_init_hw_timer(adapter); dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", netdev->name, ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : "unknown"), ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : "unknown"), netdev->dev_addr); ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); if (ret_val) strcpy(part_str, "Unknown"); dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); dev_info(&pdev->dev, "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", adapter->msix_entries ? "MSI-X" : (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); switch (hw->mac.type) { case e1000_i350: igb_set_eee_i350(hw); break; default: break; } return 0; err_register: igb_release_hw_control(adapter); err_eeprom: if (!igb_check_reset_block(hw)) igb_reset_phy(hw); if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: igb_clear_interrupt_scheme(adapter); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * igb_remove - Device Removal Routine * @pdev: PCI device information struct * * igb_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void __devexit igb_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. */ set_bit(__IGB_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); #ifdef CONFIG_IGB_DCA if (adapter->flags & IGB_FLAG_DCA_ENABLED) { dev_info(&pdev->dev, "DCA disabled\n"); dca_remove_requester(&pdev->dev); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } #endif /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ igb_release_hw_control(adapter); unregister_netdev(netdev); igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV /* reclaim resources allocated to VFs */ if (adapter->vf_data) { /* disable iov and allow time for transactions to clear */ pci_disable_sriov(pdev); msleep(500); kfree(adapter->vf_data); adapter->vf_data = NULL; wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); msleep(100); dev_info(&pdev->dev, "IOV Disabled\n"); } #endif iounmap(hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } /** * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space * @adapter: board private structure to initialize * * This function initializes the vf specific data storage and then attempts to * allocate the VFs. The reason for ordering it this way is because it is much * mor expensive time wise to disable SR-IOV than it is to allocate and free * the memory for the VFs. **/ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) { #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; if (adapter->vfs_allocated_count) { adapter->vf_data = kcalloc(adapter->vfs_allocated_count, sizeof(struct vf_data_storage), GFP_KERNEL); /* if allocation failed then we do not support SR-IOV */ if (!adapter->vf_data) { adapter->vfs_allocated_count = 0; dev_err(&pdev->dev, "Unable to allocate memory for VF " "Data Storage\n"); } } if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) { kfree(adapter->vf_data); adapter->vf_data = NULL; #endif /* CONFIG_PCI_IOV */ adapter->vfs_allocated_count = 0; #ifdef CONFIG_PCI_IOV } else { unsigned char mac_addr[ETH_ALEN]; int i; dev_info(&pdev->dev, "%d vfs allocated\n", adapter->vfs_allocated_count); for (i = 0; i < adapter->vfs_allocated_count; i++) { random_ether_addr(mac_addr); igb_set_vf_mac(adapter, i, mac_addr); } /* DMA Coalescing is not supported in IOV mode. */ if (adapter->flags & IGB_FLAG_DMAC) adapter->flags &= ~IGB_FLAG_DMAC; } #endif /* CONFIG_PCI_IOV */ } /** * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp * @adapter: board private structure to initialize * * igb_init_hw_timer initializes the function pointer and values for the hw * timer found in hardware. **/ static void igb_init_hw_timer(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; switch (hw->mac.type) { case e1000_i350: case e1000_82580: memset(&adapter->cycles, 0, sizeof(adapter->cycles)); adapter->cycles.read = igb_read_clock; adapter->cycles.mask = CLOCKSOURCE_MASK(64); adapter->cycles.mult = 1; /* * The 82580 timesync updates the system timer every 8ns by 8ns * and the value cannot be shifted. Instead we need to shift * the registers to generate a 64bit timer value. As a result * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by * 24 in order to generate a larger value for synchronization. */ adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; /* disable system timer temporarily by setting bit 31 */ wr32(E1000_TSAUXC, 0x80000000); wrfl(); /* Set registers so that rollover occurs soon to test this. */ wr32(E1000_SYSTIMR, 0x00000000); wr32(E1000_SYSTIML, 0x80000000); wr32(E1000_SYSTIMH, 0x000000FF); wrfl(); /* enable system timer by clearing bit 31 */ wr32(E1000_TSAUXC, 0x0); wrfl(); timecounter_init(&adapter->clock, &adapter->cycles, ktime_to_ns(ktime_get_real())); /* * Synchronize our NIC clock against system wall clock. NIC * time stamp reading requires ~3us per sample, each sample * was pretty stable even under load => only require 10 * samples for each offset comparison. */ memset(&adapter->compare, 0, sizeof(adapter->compare)); adapter->compare.source = &adapter->clock; adapter->compare.target = ktime_get_real; adapter->compare.num_samples = 10; timecompare_update(&adapter->compare, 0); break; case e1000_82576: /* * Initialize hardware timer: we keep it running just in case * that some program needs it later on. */ memset(&adapter->cycles, 0, sizeof(adapter->cycles)); adapter->cycles.read = igb_read_clock; adapter->cycles.mask = CLOCKSOURCE_MASK(64); adapter->cycles.mult = 1; /** * Scale the NIC clock cycle by a large factor so that * relatively small clock corrections can be added or * subtracted at each clock tick. The drawbacks of a large * factor are a) that the clock register overflows more quickly * (not such a big deal) and b) that the increment per tick has * to fit into 24 bits. As a result we need to use a shift of * 19 so we can fit a value of 16 into the TIMINCA register. */ adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; wr32(E1000_TIMINCA, (1 << E1000_TIMINCA_16NS_SHIFT) | (16 << IGB_82576_TSYNC_SHIFT)); /* Set registers so that rollover occurs soon to test this. */ wr32(E1000_SYSTIML, 0x00000000); wr32(E1000_SYSTIMH, 0xFF800000); wrfl(); timecounter_init(&adapter->clock, &adapter->cycles, ktime_to_ns(ktime_get_real())); /* * Synchronize our NIC clock against system wall clock. NIC * time stamp reading requires ~3us per sample, each sample * was pretty stable even under load => only require 10 * samples for each offset comparison. */ memset(&adapter->compare, 0, sizeof(adapter->compare)); adapter->compare.source = &adapter->clock; adapter->compare.target = ktime_get_real; adapter->compare.num_samples = 10; timecompare_update(&adapter->compare, 0); break; case e1000_82575: /* 82575 does not support timesync */ default: break; } } /** * igb_sw_init - Initialize general software structures (struct igb_adapter) * @adapter: board private structure to initialize * * igb_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int __devinit igb_sw_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); adapter->tx_ring_count = IGB_DEFAULT_TXD; adapter->rx_ring_count = IGB_DEFAULT_RXD; adapter->rx_itr_setting = IGB_DEFAULT_ITR; adapter->tx_itr_setting = IGB_DEFAULT_ITR; adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { case e1000_82576: case e1000_i350: if (max_vfs > 7) { dev_warn(&pdev->dev, "Maximum of 7 VFs per PF, using max\n"); adapter->vfs_allocated_count = 7; } else adapter->vfs_allocated_count = max_vfs; break; default: break; } #endif /* CONFIG_PCI_IOV */ adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); /* i350 cannot do RSS and SR-IOV at the same time */ if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) adapter->rss_queues = 1; /* * if rss_queues > 4 or vfs are going to be allocated with rss_queues * then we should combine the queues into a queue pair in order to * conserve interrupts due to limited supply */ if ((adapter->rss_queues > 4) || ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) adapter->flags |= IGB_FLAG_QUEUE_PAIRS; /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } igb_probe_vfs(adapter); /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); if (hw->mac.type == e1000_i350) adapter->flags &= ~IGB_FLAG_DMAC; set_bit(__IGB_DOWN, &adapter->state); return 0; } /** * igb_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int igb_open(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int err; int i; /* disallow open during test */ if (test_bit(__IGB_TESTING, &adapter->state)) return -EBUSY; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = igb_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = igb_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; igb_power_up_link(adapter); /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. */ igb_configure(adapter); err = igb_request_irq(adapter); if (err) goto err_req_irq; /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; napi_enable(&q_vector->napi); } /* Clear any pending interrupts. */ rd32(E1000_ICR); igb_irq_enable(adapter); /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { u32 reg_data = rd32(E1000_CTRL_EXT); reg_data |= E1000_CTRL_EXT_PFRSTD; wr32(E1000_CTRL_EXT, reg_data); } netif_tx_start_all_queues(netdev); /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); return 0; err_req_irq: igb_release_hw_control(adapter); igb_power_down_link(adapter); igb_free_all_rx_resources(adapter); err_setup_rx: igb_free_all_tx_resources(adapter); err_setup_tx: igb_reset(adapter); return err; } /** * igb_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the driver's control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static int igb_close(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); igb_down(adapter); igb_free_irq(adapter); igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); return 0; } /** * igb_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int igb_setup_tx_resources(struct igb_ring *tx_ring) { struct device *dev = tx_ring->dev; int size; size = sizeof(struct igb_buffer) * tx_ring->count; tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->buffer_info); dev_err(dev, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } /** * igb_setup_all_tx_resources - wrapper to allocate Tx resources * (Descriptors) for all queues * @adapter: board private structure * * Return 0 on success, negative on failure **/ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = igb_setup_tx_resources(adapter->tx_ring[i]); if (err) { dev_err(&pdev->dev, "Allocation for Tx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_tx_resources(adapter->tx_ring[i]); break; } } for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { int r_idx = i % adapter->num_tx_queues; adapter->multi_tx_table[i] = adapter->tx_ring[r_idx]; } return err; } /** * igb_setup_tctl - configure the transmit control registers * @adapter: Board private structure **/ void igb_setup_tctl(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 tctl; /* disable queue 0 which is enabled by default on 82575 and 82576 */ wr32(E1000_TXDCTL(0), 0); /* Program the Transmit Control Register */ tctl = rd32(E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); igb_config_collision_dist(hw); /* Enable transmits */ tctl |= E1000_TCTL_EN; wr32(E1000_TCTL, tctl); } /** * igb_configure_tx_ring - Configure transmit ring after Reset * @adapter: board private structure * @ring: tx ring to configure * * Configure a transmit ring after a reset. **/ void igb_configure_tx_ring(struct igb_adapter *adapter, struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; u32 txdctl; u64 tdba = ring->dma; int reg_idx = ring->reg_idx; /* disable the queue */ txdctl = rd32(E1000_TXDCTL(reg_idx)); wr32(E1000_TXDCTL(reg_idx), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); wrfl(); mdelay(10); wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), tdba & 0x00000000ffffffffULL); wr32(E1000_TDBAH(reg_idx), tdba >> 32); ring->head = hw->hw_addr + E1000_TDH(reg_idx); ring->tail = hw->hw_addr + E1000_TDT(reg_idx); writel(0, ring->head); writel(0, ring->tail); txdctl |= IGB_TX_PTHRESH; txdctl |= IGB_TX_HTHRESH << 8; txdctl |= IGB_TX_WTHRESH << 16; txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); } /** * igb_configure_tx - Configure transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void igb_configure_tx(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) igb_configure_tx_ring(adapter, adapter->tx_ring[i]); } /** * igb_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int igb_setup_rx_resources(struct igb_ring *rx_ring) { struct device *dev = rx_ring->dev; int size, desc_len; size = sizeof(struct igb_buffer) * rx_ring->count; rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) goto err; desc_len = sizeof(union e1000_adv_rx_desc); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * desc_len; rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; err: vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the receive descriptor" " ring\n"); return -ENOMEM; } /** * igb_setup_all_rx_resources - wrapper to allocate Rx resources * (Descriptors) for all queues * @adapter: board private structure * * Return 0 on success, negative on failure **/ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = igb_setup_rx_resources(adapter->rx_ring[i]); if (err) { dev_err(&pdev->dev, "Allocation for Rx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_rx_resources(adapter->rx_ring[i]); break; } } return err; } /** * igb_setup_mrqc - configure the multiple receive queue control registers * @adapter: Board private structure **/ static void igb_setup_mrqc(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; u32 j, num_rx_queues, shift = 0, shift2 = 0; union e1000_reta { u32 dword; u8 bytes[4]; } reta; static const u8 rsshash[40] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; /* Fill out hash function seeds */ for (j = 0; j < 10; j++) { u32 rsskey = rsshash[(j * 4)]; rsskey |= rsshash[(j * 4) + 1] << 8; rsskey |= rsshash[(j * 4) + 2] << 16; rsskey |= rsshash[(j * 4) + 3] << 24; array_wr32(E1000_RSSRK(0), j, rsskey); } num_rx_queues = adapter->rss_queues; if (adapter->vfs_allocated_count) { /* 82575 and 82576 supports 2 RSS queues for VMDq */ switch (hw->mac.type) { case e1000_i350: case e1000_82580: num_rx_queues = 1; shift = 0; break; case e1000_82576: shift = 3; num_rx_queues = 2; break; case e1000_82575: shift = 2; shift2 = 6; default: break; } } else { if (hw->mac.type == e1000_82575) shift = 6; } for (j = 0; j < (32 * 4); j++) { reta.bytes[j & 3] = (j % num_rx_queues) << shift; if (shift2) reta.bytes[j & 3] |= num_rx_queues << shift2; if ((j & 3) == 3) wr32(E1000_RETA(j >> 2), reta.dword); } /* * Disable raw packet checksumming so that RSS hash is placed in * descriptor on writeback. No need to enable TCP/UDP/IP checksum * offloads as they are enabled by default */ rxcsum = rd32(E1000_RXCSUM); rxcsum |= E1000_RXCSUM_PCSD; if (adapter->hw.mac.type >= e1000_82576) /* Enable Receive Checksum Offload for SCTP */ rxcsum |= E1000_RXCSUM_CRCOFL; /* Don't need to set TUOFL or IPOFL, they default to 1 */ wr32(E1000_RXCSUM, rxcsum); /* If VMDq is enabled then we set the appropriate mode for that, else * we default to RSS so that an RSS hash is calculated per packet even * if we are only using one queue */ if (adapter->vfs_allocated_count) { if (hw->mac.type > e1000_82575) { /* Set the default pool for the PF's first queue */ u32 vtctl = rd32(E1000_VT_CTL); vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | E1000_VT_CTL_DISABLE_DEF_POOL); vtctl |= adapter->vfs_allocated_count << E1000_VT_CTL_DEFAULT_POOL_SHIFT; wr32(E1000_VT_CTL, vtctl); } if (adapter->rss_queues > 1) mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; else mrqc = E1000_MRQC_ENABLE_VMDQ; } else { mrqc = E1000_MRQC_ENABLE_RSS_4Q; } igb_vmm_control(adapter); /* * Generate RSS hash based on TCP port numbers and/or * IPv4/v6 src and dst addresses since UDP cannot be * hashed reliably due to IP fragmentation */ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; wr32(E1000_MRQC, mrqc); } /** * igb_setup_rctl - configure the receive control registers * @adapter: Board private structure **/ void igb_setup_rctl(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl; rctl = rd32(E1000_RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* * enable stripping of CRC. It's unlikely this will break BMC * redirection as it did with e1000. Newer features require * that the HW strips the CRC. */ rctl |= E1000_RCTL_SECRC; /* disable store bad packets and clear size bits. */ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); /* enable LPE to prevent packets larger than max_frame_size */ rctl |= E1000_RCTL_LPE; /* disable queue 0 to prevent tail write w/o re-config */ wr32(E1000_RXDCTL(0), 0); /* Attention!!! For SR-IOV PF driver operations you must enable * queue drop for all VF and PF queues to prevent head of line blocking * if an un-trusted VF does not provide descriptors to hardware. */ if (adapter->vfs_allocated_count) { /* set all queue drop enable bits */ wr32(E1000_QDE, ALL_QUEUES); } wr32(E1000_RCTL, rctl); } static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, int vfn) { struct e1000_hw *hw = &adapter->hw; u32 vmolr; /* if it isn't the PF check to see if VFs are enabled and * increase the size to support vlan tags */ if (vfn < adapter->vfs_allocated_count && adapter->vf_data[vfn].vlans_enabled) size += VLAN_TAG_SIZE; vmolr = rd32(E1000_VMOLR(vfn)); vmolr &= ~E1000_VMOLR_RLPML_MASK; vmolr |= size | E1000_VMOLR_LPE; wr32(E1000_VMOLR(vfn), vmolr); return 0; } /** * igb_rlpml_set - set maximum receive packet size * @adapter: board private structure * * Configure maximum receivable packet size. **/ static void igb_rlpml_set(struct igb_adapter *adapter) { u32 max_frame_size = adapter->max_frame_size; struct e1000_hw *hw = &adapter->hw; u16 pf_id = adapter->vfs_allocated_count; if (adapter->vlgrp) max_frame_size += VLAN_TAG_SIZE; /* if vfs are enabled we set RLPML to the largest possible request * size and set the VMOLR RLPML to the size we need */ if (pf_id) { igb_set_vf_rlpml(adapter, max_frame_size, pf_id); max_frame_size = MAX_JUMBO_FRAME_SIZE; } wr32(E1000_RLPML, max_frame_size); } static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn, bool aupe) { struct e1000_hw *hw = &adapter->hw; u32 vmolr; /* * This register exists only on 82576 and newer so if we are older then * we should exit and do nothing */ if (hw->mac.type < e1000_82576) return; vmolr = rd32(E1000_VMOLR(vfn)); vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ if (aupe) vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ else vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ /* clear all bits that might not be set */ vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ /* * for VMDq only allow the VFs and pool 0 to accept broadcast and * multicast packets */ if (vfn <= adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ wr32(E1000_VMOLR(vfn), vmolr); } /** * igb_configure_rx_ring - Configure a receive ring after Reset * @adapter: board private structure * @ring: receive ring to be configured * * Configure the Rx unit of the MAC after a reset. **/ void igb_configure_rx_ring(struct igb_adapter *adapter, struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; u64 rdba = ring->dma; int reg_idx = ring->reg_idx; u32 srrctl, rxdctl; /* disable the queue */ rxdctl = rd32(E1000_RXDCTL(reg_idx)); wr32(E1000_RXDCTL(reg_idx), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); /* Set DMA base address registers */ wr32(E1000_RDBAL(reg_idx), rdba & 0x00000000ffffffffULL); wr32(E1000_RDBAH(reg_idx), rdba >> 32); wr32(E1000_RDLEN(reg_idx), ring->count * sizeof(union e1000_adv_rx_desc)); /* initialize head and tail */ ring->head = hw->hw_addr + E1000_RDH(reg_idx); ring->tail = hw->hw_addr + E1000_RDT(reg_idx); writel(0, ring->head); writel(0, ring->tail); /* set descriptor configuration */ if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { srrctl = ALIGN(ring->rx_buffer_len, 64) << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; #else srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; #endif srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } else { srrctl = ALIGN(ring->rx_buffer_len, 1024) >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; } if (hw->mac.type == e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; wr32(E1000_SRRCTL(reg_idx), srrctl); /* set filtering for VMDQ pools */ igb_set_vmolr(adapter, reg_idx & 0x7, true); /* enable receive descriptor fetching */ rxdctl = rd32(E1000_RXDCTL(reg_idx)); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGB_RX_PTHRESH; rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; wr32(E1000_RXDCTL(reg_idx), rxdctl); } /** * igb_configure_rx - Configure receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void igb_configure_rx(struct igb_adapter *adapter) { int i; /* set UTA to appropriate mode */ igb_set_uta(adapter); /* set the correct pool for the PF default MAC address in entry 0 */ igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, adapter->vfs_allocated_count); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) igb_configure_rx_ring(adapter, adapter->rx_ring[i]); } /** * igb_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void igb_free_tx_resources(struct igb_ring *tx_ring) { igb_clean_tx_ring(tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; /* if not set, then don't free */ if (!tx_ring->desc) return; dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * igb_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ static void igb_free_all_tx_resources(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) igb_free_tx_resources(adapter->tx_ring[i]); } void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, struct igb_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) dma_unmap_page(tx_ring->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); else dma_unmap_single(tx_ring->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } buffer_info->time_stamp = 0; buffer_info->length = 0; buffer_info->next_to_watch = 0; buffer_info->mapped_as_page = false; } /** * igb_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void igb_clean_tx_ring(struct igb_ring *tx_ring) { struct igb_buffer *buffer_info; unsigned long size; unsigned int i; if (!tx_ring->buffer_info) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } size = sizeof(struct igb_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } /** * igb_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) igb_clean_tx_ring(adapter->tx_ring[i]); } /** * igb_free_rx_resources - Free Rx Resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void igb_free_rx_resources(struct igb_ring *rx_ring) { igb_clean_rx_ring(rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; /* if not set, then don't free */ if (!rx_ring->desc) return; dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * igb_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ static void igb_free_all_rx_resources(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) igb_free_rx_resources(adapter->rx_ring[i]); } /** * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ static void igb_clean_rx_ring(struct igb_ring *rx_ring) { struct igb_buffer *buffer_info; unsigned long size; unsigned int i; if (!rx_ring->buffer_info) return; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { dma_unmap_single(rx_ring->dev, buffer_info->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } if (buffer_info->page_dma) { dma_unmap_page(rx_ring->dev, buffer_info->page_dma, PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; } if (buffer_info->page) { put_page(buffer_info->page); buffer_info->page = NULL; buffer_info->page_offset = 0; } } size = sizeof(struct igb_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } /** * igb_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) igb_clean_rx_ring(adapter->rx_ring[i]); } /** * igb_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int igb_set_mac(struct net_device *netdev, void *p) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ igb_rar_set_qsel(adapter, hw->mac.addr, 0, adapter->vfs_allocated_count); return 0; } /** * igb_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure * * Writes multicast address list to the MTA hash table. * Returns: -ENOMEM on failure * 0 on no addresses written * X on writing X addresses to MTA **/ static int igb_write_mc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u8 *mta_list; int i; if (netdev_mc_empty(netdev)) { /* nothing to program, so clear mc list */ igb_update_mc_addr_list(hw, NULL, 0); igb_restore_vf_multicasts(adapter); return 0; } mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); if (!mta_list) return -ENOMEM; /* The shared function expects a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); igb_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); return netdev_mc_count(netdev); } /** * igb_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure * * Writes unicast address list to the RAR table. * Returns: -ENOMEM on failure/insufficient address space * 0 on no addresses written * X on writing X addresses to the RAR table **/ static int igb_write_uc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); int count = 0; /* return ENOMEM indicating insufficient memory for addresses */ if (netdev_uc_count(netdev) > rar_entries) return -ENOMEM; if (!netdev_uc_empty(netdev) && rar_entries) { struct netdev_hw_addr *ha; netdev_for_each_uc_addr(ha, netdev) { if (!rar_entries) break; igb_rar_set_qsel(adapter, ha->addr, rar_entries--, vfn); count++; } } /* write the addresses in reverse order to avoid write combining */ for (; rar_entries > 0 ; rar_entries--) { wr32(E1000_RAH(rar_entries), 0); wr32(E1000_RAL(rar_entries), 0); } wrfl(); return count; } /** * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_rx_mode entry point is called whenever the unicast or multicast * address lists or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. **/ static void igb_set_rx_mode(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; u32 rctl, vmolr = 0; int count; /* Check for Promiscuous and All Multicast modes */ rctl = rd32(E1000_RCTL); /* clear the effected bits */ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); if (netdev->flags & IFF_PROMISC) { rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); } else { if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; vmolr |= E1000_VMOLR_MPME; } else { /* * Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ count = igb_write_mc_addr_list(netdev); if (count < 0) { rctl |= E1000_RCTL_MPE; vmolr |= E1000_VMOLR_MPME; } else if (count) { vmolr |= E1000_VMOLR_ROMPE; } } /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable * unicast promiscuous mode */ count = igb_write_uc_addr_list(netdev); if (count < 0) { rctl |= E1000_RCTL_UPE; vmolr |= E1000_VMOLR_ROPE; } rctl |= E1000_RCTL_VFE; } wr32(E1000_RCTL, rctl); /* * In order to support SR-IOV and eventually VMDq it is necessary to set * the VMOLR to enable the appropriate modes. Without this workaround * we will have issues with VLAN tag stripping not being done for frames * that are only arriving because we are the default pool */ if (hw->mac.type < e1000_82576) return; vmolr |= rd32(E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); wr32(E1000_VMOLR(vfn), vmolr); igb_restore_vf_multicasts(adapter); } static void igb_check_wvbr(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 wvbr = 0; switch (hw->mac.type) { case e1000_82576: case e1000_i350: if (!(wvbr = rd32(E1000_WVBR))) return; break; default: break; } adapter->wvbr |= wvbr; } #define IGB_STAGGERED_QUEUE_OFFSET 8 static void igb_spoof_check(struct igb_adapter *adapter) { int j; if (!adapter->wvbr) return; for(j = 0; j < adapter->vfs_allocated_count; j++) { if (adapter->wvbr & (1 << j) || adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { dev_warn(&adapter->pdev->dev, "Spoof event(s) detected on VF %d\n", j); adapter->wvbr &= ~((1 << j) | (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); } } } /* Need to wait a few seconds after link up to get diagnostic information from * the phy */ static void igb_update_phy_info(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *) data; igb_get_phy_info(&adapter->hw); } /** * igb_has_link - check shared code for link and determine up/down * @adapter: pointer to driver private info **/ bool igb_has_link(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; bool link_active = false; s32 ret_val = 0; /* get_link_status is set on LSC (link status) interrupt or * rx sequence error interrupt. get_link_status will stay * false until the e1000_check_for_link establishes link * for copper adapters ONLY */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); link_active = !hw->mac.get_link_status; } else { link_active = true; } break; case e1000_media_type_internal_serdes: ret_val = hw->mac.ops.check_for_link(hw); link_active = hw->mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } return link_active; } static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) { bool ret = false; u32 ctrl_ext, thstat; /* check for thermal sensor event on i350, copper only */ if (hw->mac.type == e1000_i350) { thstat = rd32(E1000_THSTAT); ctrl_ext = rd32(E1000_CTRL_EXT); if ((hw->phy.media_type == e1000_media_type_copper) && !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { ret = !!(thstat & event); } } return ret; } /** * igb_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ static void igb_watchdog(unsigned long data) { struct igb_adapter *adapter = (struct igb_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); } static void igb_watchdog_task(struct work_struct *work) { struct igb_adapter *adapter = container_of(work, struct igb_adapter, watchdog_task); struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 link; int i; link = igb_has_link(adapter); if (link) { if (!netif_carrier_ok(netdev)) { u32 ctrl; hw->mac.ops.get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); ctrl = rd32(E1000_CTRL); /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " "Flow Control: %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex", ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); /* check for thermal sensor event */ if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) { printk(KERN_INFO "igb: %s The network adapter " "link speed was downshifted " "because it overheated.\n", netdev->name); } /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: adapter->tx_timeout_factor = 14; break; case SPEED_100: /* maybe add some timeout factor ? */ break; } netif_carrier_on(netdev); igb_ping_all_vfs(adapter); igb_check_vf_rate_limit(adapter); /* link state has changed, schedule phy info update */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); } } else { if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; /* check for thermal sensor event */ if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) { printk(KERN_ERR "igb: %s The network adapter " "was stopped because it " "overheated.\n", netdev->name); } /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Down\n", netdev->name); netif_carrier_off(netdev); igb_ping_all_vfs(adapter); /* link state has changed, schedule phy info update */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); } } spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); spin_unlock(&adapter->stats64_lock); for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = adapter->tx_ring[i]; if (!netif_carrier_ok(netdev)) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return; } } /* Force detection of hung controller every watchdog period */ tx_ring->detect_tx_hung = true; } /* Cause software interrupt to ensure rx ring is cleaned */ if (adapter->msix_entries) { u32 eics = 0; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; eics |= q_vector->eims_value; } wr32(E1000_EICS, eics); } else { wr32(E1000_ICS, E1000_ICS_RXDMT0); } igb_spoof_check(adapter); /* Reset the timer */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); } enum latency_range { lowest_latency = 0, low_latency = 1, bulk_latency = 2, latency_invalid = 255 }; /** * igb_update_ring_itr - update the dynamic ITR value based on packet size * * Stores a new ITR value based on strictly on packet size. This * algorithm is less sophisticated than that used in igb_update_itr, * due to the difficulty of synchronizing statistics across multiple * receive rings. The divisors and thresholds used by this function * were determined based on theoretical maximum wire speed and testing * data, in order to minimize response time while increasing bulk * throughput. * This functionality is controlled by the InterruptThrottleRate module * parameter (see igb_param.c) * NOTE: This function is called only when operating in a multiqueue * receive environment. * @q_vector: pointer to q_vector **/ static void igb_update_ring_itr(struct igb_q_vector *q_vector) { int new_val = q_vector->itr_val; int avg_wire_size = 0; struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *ring; unsigned int packets; /* For non-gigabit speeds, just fix the interrupt rate at 4000 * ints/sec - ITR timer value of 120 ticks. */ if (adapter->link_speed != SPEED_1000) { new_val = 976; goto set_itr_val; } ring = q_vector->rx_ring; if (ring) { packets = ACCESS_ONCE(ring->total_packets); if (packets) avg_wire_size = ring->total_bytes / packets; } ring = q_vector->tx_ring; if (ring) { packets = ACCESS_ONCE(ring->total_packets); if (packets) avg_wire_size = max_t(u32, avg_wire_size, ring->total_bytes / packets); } /* if avg_wire_size isn't set no work was done */ if (!avg_wire_size) goto clear_counts; /* Add 24 bytes to size to account for CRC, preamble, and gap */ avg_wire_size += 24; /* Don't starve jumbo frames */ avg_wire_size = min(avg_wire_size, 3000); /* Give a little boost to mid-size frames */ if ((avg_wire_size > 300) && (avg_wire_size < 1200)) new_val = avg_wire_size / 3; else new_val = avg_wire_size / 2; /* when in itr mode 3 do not exceed 20K ints/sec */ if (adapter->rx_itr_setting == 3 && new_val < 196) new_val = 196; set_itr_val: if (new_val != q_vector->itr_val) { q_vector->itr_val = new_val; q_vector->set_itr = 1; } clear_counts: if (q_vector->rx_ring) { q_vector->rx_ring->total_bytes = 0; q_vector->rx_ring->total_packets = 0; } if (q_vector->tx_ring) { q_vector->tx_ring->total_bytes = 0; q_vector->tx_ring->total_packets = 0; } } /** * igb_update_itr - update the dynamic ITR value based on statistics * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. * this functionality is controlled by the InterruptThrottleRate module * parameter (see igb_param.c) * NOTE: These calculations are only valid when operating in a single- * queue environment. * @adapter: pointer to adapter * @itr_setting: current q_vector->itr_val * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval **/ static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting, int packets, int bytes) { unsigned int retval = itr_setting; if (packets == 0) goto update_itr_done; switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ if (bytes/packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes/packets > 8000) { retval = bulk_latency; } else if ((packets < 10) || ((bytes/packets) > 1200)) { retval = bulk_latency; } else if ((packets > 35)) { retval = lowest_latency; } } else if (bytes/packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; } break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; } else if (bytes < 1500) { retval = low_latency; } break; } update_itr_done: return retval; } static void igb_set_itr(struct igb_adapter *adapter) { struct igb_q_vector *q_vector = adapter->q_vector[0]; u16 current_itr; u32 new_itr = q_vector->itr_val; /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ if (adapter->link_speed != SPEED_1000) { current_itr = 0; new_itr = 4000; goto set_itr_now; } adapter->rx_itr = igb_update_itr(adapter, adapter->rx_itr, q_vector->rx_ring->total_packets, q_vector->rx_ring->total_bytes); adapter->tx_itr = igb_update_itr(adapter, adapter->tx_itr, q_vector->tx_ring->total_packets, q_vector->tx_ring->total_bytes); current_itr = max(adapter->rx_itr, adapter->tx_itr); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) current_itr = low_latency; switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = 56; /* aka 70,000 ints/sec */ break; case low_latency: new_itr = 196; /* aka 20,000 ints/sec */ break; case bulk_latency: new_itr = 980; /* aka 4,000 ints/sec */ break; default: break; } set_itr_now: q_vector->rx_ring->total_bytes = 0; q_vector->rx_ring->total_packets = 0; q_vector->tx_ring->total_bytes = 0; q_vector->tx_ring->total_packets = 0; if (new_itr != q_vector->itr_val) { /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > q_vector->itr_val ? max((new_itr * q_vector->itr_val) / (new_itr + (q_vector->itr_val >> 2)), new_itr) : new_itr; /* Don't write the value here; it resets the adapter's * internal timer, and causes us to delay far longer than * we should between interrupts. Instead, we write the ITR * value at the beginning of the next interrupt so the timing * ends up being correct. */ q_vector->itr_val = new_itr; q_vector->set_itr = 1; } } #define IGB_TX_FLAGS_CSUM 0x00000001 #define IGB_TX_FLAGS_VLAN 0x00000002 #define IGB_TX_FLAGS_TSO 0x00000004 #define IGB_TX_FLAGS_IPV4 0x00000008 #define IGB_TX_FLAGS_TSTAMP 0x00000010 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGB_TX_FLAGS_VLAN_SHIFT 16 static inline int igb_tso_adv(struct igb_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { struct e1000_adv_tx_context_desc *context_desc; unsigned int i; int err; struct igb_buffer *buffer_info; u32 info = 0, tu_cmd = 0; u32 mss_l4len_idx; u8 l4len; if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } l4len = tcp_hdrlen(skb); *hdr_len += l4len; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); /* VLAN MACLEN IPLEN */ if (tx_flags & IGB_TX_FLAGS_VLAN) info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); *hdr_len += skb_network_offset(skb); info |= skb_network_header_len(skb); *hdr_len += skb_network_header_len(skb); context_desc->vlan_macip_lens = cpu_to_le32(info); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); if (skb->protocol == htons(ETH_P_IP)) tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); /* MSS L4LEN IDX */ mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); /* For 82575, context index must be unique per ring. */ if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) mss_l4len_idx |= tx_ring->reg_idx << 4; context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); context_desc->seqnum_seed = 0; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; return true; } static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { struct e1000_adv_tx_context_desc *context_desc; struct device *dev = tx_ring->dev; struct igb_buffer *buffer_info; u32 info = 0, tu_cmd = 0; unsigned int i; if ((skb->ip_summed == CHECKSUM_PARTIAL) || (tx_flags & IGB_TX_FLAGS_VLAN)) { i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); if (tx_flags & IGB_TX_FLAGS_VLAN) info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) info |= skb_network_header_len(skb); context_desc->vlan_macip_lens = cpu_to_le32(info); tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); if (skb->ip_summed == CHECKSUM_PARTIAL) { __be16 protocol; if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { const struct vlan_ethhdr *vhdr = (const struct vlan_ethhdr*)skb->data; protocol = vhdr->h_vlan_encapsulated_proto; } else { protocol = skb->protocol; } switch (protocol) { case cpu_to_be16(ETH_P_IP): tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; case cpu_to_be16(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; default: if (unlikely(net_ratelimit())) dev_warn(dev, "partial checksum but proto=%x!\n", skb->protocol); break; } } context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); context_desc->seqnum_seed = 0; if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) context_desc->mss_l4len_idx = cpu_to_le32(tx_ring->reg_idx << 4); buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; return true; } return false; } #define IGB_MAX_TXD_PWR 16 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, unsigned int first) { struct igb_buffer *buffer_info; struct device *dev = tx_ring->dev; unsigned int hlen = skb_headlen(skb); unsigned int count = 0, i; unsigned int f; u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1; i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD); buffer_info->length = hlen; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = dma_map_single(dev, skb->data, hlen, DMA_TO_DEVICE); if (dma_mapping_error(dev, buffer_info->dma)) goto dma_error; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; unsigned int len = frag->size; count++; i++; if (i == tx_ring->count) i = 0; buffer_info = &tx_ring->buffer_info[i]; BUG_ON(len >= IGB_MAX_DATA_PER_TXD); buffer_info->length = len; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->mapped_as_page = true; buffer_info->dma = dma_map_page(dev, frag->page, frag->page_offset, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, buffer_info->dma)) goto dma_error; } tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags; /* multiply data chunks by size of headers */ tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; tx_ring->buffer_info[i].gso_segs = gso_segs; tx_ring->buffer_info[first].next_to_watch = i; return ++count; dma_error: dev_err(dev, "TX DMA map failed\n"); /* clear timestamp and dma mappings for failed buffer_info mapping */ buffer_info->dma = 0; buffer_info->time_stamp = 0; buffer_info->length = 0; buffer_info->next_to_watch = 0; buffer_info->mapped_as_page = false; /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { if (i == 0) i = tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } return 0; } static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, u32 tx_flags, int count, u32 paylen, u8 hdr_len) { union e1000_adv_tx_desc *tx_desc; struct igb_buffer *buffer_info; u32 olinfo_status = 0, cmd_type_len; unsigned int i = tx_ring->next_to_use; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); if (tx_flags & IGB_TX_FLAGS_VLAN) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; if (tx_flags & IGB_TX_FLAGS_TSTAMP) cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; if (tx_flags & IGB_TX_FLAGS_TSO) { cmd_type_len |= E1000_ADVTXD_DCMD_TSE; /* insert tcp checksum */ olinfo_status |= E1000_TXD_POPTS_TXSM << 8; /* insert ip checksum */ if (tx_flags & IGB_TX_FLAGS_IPV4) olinfo_status |= E1000_TXD_POPTS_IXSM << 8; } else if (tx_flags & IGB_TX_FLAGS_CSUM) { olinfo_status |= E1000_TXD_POPTS_TXSM << 8; } if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_VLAN))) olinfo_status |= tx_ring->reg_idx << 4; olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); do { buffer_info = &tx_ring->buffer_info[i]; tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); count--; i++; if (i == tx_ring->count) i = 0; } while (count > 0); tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); tx_ring->next_to_use = i; writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail * at a time, it syncronizes IO on IA64/Altix systems */ mmiowb(); } static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) { struct net_device *netdev = tx_ring->netdev; netif_stop_subqueue(netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again in a case another CPU has just * made room available. */ if (igb_desc_unused(tx_ring) < size) return -EBUSY; /* A reprieve! */ netif_wake_subqueue(netdev, tx_ring->queue_index); u64_stats_update_begin(&tx_ring->tx_syncp2); tx_ring->tx_stats.restart_queue2++; u64_stats_update_end(&tx_ring->tx_syncp2); return 0; } static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) { if (igb_desc_unused(tx_ring) >= size) return 0; return __igb_maybe_stop_tx(tx_ring, size); } netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, struct igb_ring *tx_ring) { int tso = 0, count; u32 tx_flags = 0; u16 first; u8 hdr_len = 0; /* need: 1 descriptor per page, * + 2 desc gap to keep tail from touching head, * + 1 desc for skb->data, * + 1 desc for context descriptor, * otherwise try next time */ if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { /* this is a hard error */ return NETDEV_TX_BUSY; } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; } if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); } if (skb->protocol == htons(ETH_P_IP)) tx_flags |= IGB_TX_FLAGS_IPV4; first = tx_ring->next_to_use; if (skb_is_gso(skb)) { tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } if (tso) tx_flags |= IGB_TX_FLAGS_TSO; else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGB_TX_FLAGS_CSUM; /* * count reflects descriptors mapped, if 0 or less then mapping error * has occurred and we need to rewind the descriptor queue */ count = igb_tx_map_adv(tx_ring, skb, first); if (!count) { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; tx_ring->next_to_use = first; return NETDEV_TX_OK; } igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); return NETDEV_TX_OK; } static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *tx_ring; int r_idx = 0; if (test_bit(__IGB_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); tx_ring = adapter->multi_tx_table[r_idx]; /* This goes back to the question of how to logically map a tx queue * to a flow. Right now, performance is impacted slightly negatively * if using multiple tx queues. If the stack breaks away from a * single qdisc implementation, we can look at this again. */ return igb_xmit_frame_ring_adv(skb, tx_ring); } /** * igb_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ static void igb_tx_timeout(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; /* Do the reset outside of interrupt context */ adapter->tx_timeout_count++; if (hw->mac.type == e1000_82580) hw->dev_spec._82575.global_device_reset = true; schedule_work(&adapter->reset_task); wr32(E1000_EICS, (adapter->eims_enable_mask & ~adapter->eims_other)); } static void igb_reset_task(struct work_struct *work) { struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); } /** * igb_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer * **/ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct igb_adapter *adapter = netdev_priv(netdev); spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); memcpy(stats, &adapter->stats64, sizeof(*stats)); spin_unlock(&adapter->stats64_lock); return stats; } /** * igb_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int igb_change_mtu(struct net_device *netdev, int new_mtu) { struct igb_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; u32 rx_buffer_len, i; if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { dev_err(&pdev->dev, "Invalid MTU setting\n"); return -EINVAL; } if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); return -EINVAL; } while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); /* igb_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab */ if (adapter->hw.mac.type == e1000_82580) max_frame += IGB_TS_HDR_LEN; if (max_frame <= IGB_RXBUFFER_1024) rx_buffer_len = IGB_RXBUFFER_1024; else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; else rx_buffer_len = IGB_RXBUFFER_128; if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) || (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN)) rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN; if ((adapter->hw.mac.type == e1000_82580) && (rx_buffer_len == IGB_RXBUFFER_128)) rx_buffer_len += IGB_RXBUFFER_64; if (netif_running(netdev)) igb_down(adapter); dev_info(&pdev->dev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len; if (netif_running(netdev)) igb_up(adapter); else igb_reset(adapter); clear_bit(__IGB_RESETTING, &adapter->state); return 0; } /** * igb_update_stats - Update the board statistics counters * @adapter: board private structure **/ void igb_update_stats(struct igb_adapter *adapter, struct rtnl_link_stats64 *net_stats) { struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 reg, mpc; u16 phy_tmp; int i; u64 bytes, packets; unsigned int start; u64 _bytes, _packets; #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF /* * Prevent stats update while adapter is being reset, or if the pci * connection is down. */ if (adapter->link_speed == 0) return; if (pci_channel_offline(pdev)) return; bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; struct igb_ring *ring = adapter->rx_ring[i]; ring->rx_stats.drops += rqdpc_tmp; net_stats->rx_fifo_errors += rqdpc_tmp; do { start = u64_stats_fetch_begin_bh(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } net_stats->rx_bytes = bytes; net_stats->rx_packets = packets; bytes = 0; packets = 0; for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; /* read stats registers */ adapter->stats.crcerrs += rd32(E1000_CRCERRS); adapter->stats.gprc += rd32(E1000_GPRC); adapter->stats.gorc += rd32(E1000_GORCL); rd32(E1000_GORCH); /* clear GORCL */ adapter->stats.bprc += rd32(E1000_BPRC); adapter->stats.mprc += rd32(E1000_MPRC); adapter->stats.roc += rd32(E1000_ROC); adapter->stats.prc64 += rd32(E1000_PRC64); adapter->stats.prc127 += rd32(E1000_PRC127); adapter->stats.prc255 += rd32(E1000_PRC255); adapter->stats.prc511 += rd32(E1000_PRC511); adapter->stats.prc1023 += rd32(E1000_PRC1023); adapter->stats.prc1522 += rd32(E1000_PRC1522); adapter->stats.symerrs += rd32(E1000_SYMERRS); adapter->stats.sec += rd32(E1000_SEC); mpc = rd32(E1000_MPC); adapter->stats.mpc += mpc; net_stats->rx_fifo_errors += mpc; adapter->stats.scc += rd32(E1000_SCC); adapter->stats.ecol += rd32(E1000_ECOL); adapter->stats.mcc += rd32(E1000_MCC); adapter->stats.latecol += rd32(E1000_LATECOL); adapter->stats.dc += rd32(E1000_DC); adapter->stats.rlec += rd32(E1000_RLEC); adapter->stats.xonrxc += rd32(E1000_XONRXC); adapter->stats.xontxc += rd32(E1000_XONTXC); adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); adapter->stats.xofftxc += rd32(E1000_XOFFTXC); adapter->stats.fcruc += rd32(E1000_FCRUC); adapter->stats.gptc += rd32(E1000_GPTC); adapter->stats.gotc += rd32(E1000_GOTCL); rd32(E1000_GOTCH); /* clear GOTCL */ adapter->stats.rnbc += rd32(E1000_RNBC); adapter->stats.ruc += rd32(E1000_RUC); adapter->stats.rfc += rd32(E1000_RFC); adapter->stats.rjc += rd32(E1000_RJC); adapter->stats.tor += rd32(E1000_TORH); adapter->stats.tot += rd32(E1000_TOTH); adapter->stats.tpr += rd32(E1000_TPR); adapter->stats.ptc64 += rd32(E1000_PTC64); adapter->stats.ptc127 += rd32(E1000_PTC127); adapter->stats.ptc255 += rd32(E1000_PTC255); adapter->stats.ptc511 += rd32(E1000_PTC511); adapter->stats.ptc1023 += rd32(E1000_PTC1023); adapter->stats.ptc1522 += rd32(E1000_PTC1522); adapter->stats.mptc += rd32(E1000_MPTC); adapter->stats.bptc += rd32(E1000_BPTC); adapter->stats.tpt += rd32(E1000_TPT); adapter->stats.colc += rd32(E1000_COLC); adapter->stats.algnerrc += rd32(E1000_ALGNERRC); /* read internal phy specific stats */ reg = rd32(E1000_CTRL_EXT); if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { adapter->stats.rxerrc += rd32(E1000_RXERRC); adapter->stats.tncrs += rd32(E1000_TNCRS); } adapter->stats.tsctc += rd32(E1000_TSCTC); adapter->stats.tsctfc += rd32(E1000_TSCTFC); adapter->stats.iac += rd32(E1000_IAC); adapter->stats.icrxoc += rd32(E1000_ICRXOC); adapter->stats.icrxptc += rd32(E1000_ICRXPTC); adapter->stats.icrxatc += rd32(E1000_ICRXATC); adapter->stats.ictxptc += rd32(E1000_ICTXPTC); adapter->stats.ictxatc += rd32(E1000_ICTXATC); adapter->stats.ictxqec += rd32(E1000_ICTXQEC); adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); /* Fill out the OS statistics structure */ net_stats->multicast = adapter->stats.mprc; net_stats->collisions = adapter->stats.colc; /* Rx Errors */ /* RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ net_stats->rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; net_stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; net_stats->rx_crc_errors = adapter->stats.crcerrs; net_stats->rx_frame_errors = adapter->stats.algnerrc; net_stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ net_stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; net_stats->tx_aborted_errors = adapter->stats.ecol; net_stats->tx_window_errors = adapter->stats.latecol; net_stats->tx_carrier_errors = adapter->stats.tncrs; /* Tx Dropped needs to be maintained elsewhere */ /* Phy Stats */ if (hw->phy.media_type == e1000_media_type_copper) { if ((adapter->link_speed == SPEED_1000) && (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; adapter->phy_stats.idle_errors += phy_tmp; } } /* Management Stats */ adapter->stats.mgptc += rd32(E1000_MGTPTC); adapter->stats.mgprc += rd32(E1000_MGTPRC); adapter->stats.mgpdc += rd32(E1000_MGTPDC); /* OS2BMC Stats */ reg = rd32(E1000_MANC); if (reg & E1000_MANC_EN_BMC2OS) { adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); adapter->stats.o2bspc += rd32(E1000_O2BSPC); adapter->stats.b2ospc += rd32(E1000_B2OSPC); adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); } } static irqreturn_t igb_msix_other(int irq, void *data) { struct igb_adapter *adapter = data; struct e1000_hw *hw = &adapter->hw; u32 icr = rd32(E1000_ICR); /* reading ICR causes bit 31 of EICR to be cleared */ if (icr & E1000_ICR_DRSTA) schedule_work(&adapter->reset_task); if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; /* The DMA Out of Sync is also indication of a spoof event * in IOV mode. Check the Wrong VM Behavior register to * see if it is really a spoof event. */ igb_check_wvbr(adapter); } /* Check for a mailbox event */ if (icr & E1000_ICR_VMMB) igb_msg_task(adapter); if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = 1; /* guard against interrupt when we're going down */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } if (adapter->vfs_allocated_count) wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_VMMB | E1000_IMS_DOUTSYNC); else wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); wr32(E1000_EIMS, adapter->eims_other); return IRQ_HANDLED; } static void igb_write_itr(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; u32 itr_val = q_vector->itr_val & 0x7FFC; if (!q_vector->set_itr) return; if (!itr_val) itr_val = 0x4; if (adapter->hw.mac.type == e1000_82575) itr_val |= itr_val << 16; else itr_val |= 0x8000000; writel(itr_val, q_vector->itr_register); q_vector->set_itr = 0; } static irqreturn_t igb_msix_ring(int irq, void *data) { struct igb_q_vector *q_vector = data; /* Write the ITR value calculated from the previous interrupt. */ igb_write_itr(q_vector); napi_schedule(&q_vector->napi); return IRQ_HANDLED; } #ifdef CONFIG_IGB_DCA static void igb_update_dca(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; int cpu = get_cpu(); if (q_vector->cpu == cpu) goto out_no_update; if (q_vector->tx_ring) { int q = q_vector->tx_ring->reg_idx; u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); if (hw->mac.type == e1000_82575) { dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); } else { dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << E1000_DCA_TXCTRL_CPUID_SHIFT; } dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; wr32(E1000_DCA_TXCTRL(q), dca_txctrl); } if (q_vector->rx_ring) { int q = q_vector->rx_ring->reg_idx; u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); if (hw->mac.type == e1000_82575) { dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); } else { dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << E1000_DCA_RXCTRL_CPUID_SHIFT; } dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); } q_vector->cpu = cpu; out_no_update: put_cpu(); } static void igb_setup_dca(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) return; /* Always use CB2 mode, difference is masked in the CB driver. */ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); for (i = 0; i < adapter->num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; igb_update_dca(adapter->q_vector[i]); } } static int __igb_notify_dca(struct device *dev, void *data) { struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; unsigned long event = *(unsigned long *)data; switch (event) { case DCA_PROVIDER_ADD: /* if already enabled, don't do it again */ if (adapter->flags & IGB_FLAG_DCA_ENABLED) break; if (dca_add_requester(dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); break; } /* Fall Through since DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IGB_FLAG_DCA_ENABLED) { /* without this a class_device is left * hanging around in the sysfs model */ dca_remove_requester(dev); dev_info(&pdev->dev, "DCA disabled\n"); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } break; } return 0; } static int igb_notify_dca(struct notifier_block *nb, unsigned long event, void *p) { int ret_val; ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, __igb_notify_dca); return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } #endif /* CONFIG_IGB_DCA */ static void igb_ping_all_vfs(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ping; int i; for (i = 0 ; i < adapter->vfs_allocated_count; i++) { ping = E1000_PF_CONTROL_MSG; if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) ping |= E1000_VT_MSGTYPE_CTS; igb_write_mbx(hw, &ping, 1, i); } } static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 vmolr = rd32(E1000_VMOLR(vf)); struct vf_data_storage *vf_data = &adapter->vf_data[vf]; vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | IGB_VF_FLAG_MULTI_PROMISC); vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { vmolr |= E1000_VMOLR_MPME; vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; } else { /* * if we have hashes and we are clearing a multicast promisc * flag we need to write the hashes to the MTA as this step * was previously skipped */ if (vf_data->num_vf_mc_hashes > 30) { vmolr |= E1000_VMOLR_MPME; } else if (vf_data->num_vf_mc_hashes) { int j; vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) igb_mta_set(hw, vf_data->vf_mc_hashes[j]); } } wr32(E1000_VMOLR(vf), vmolr); /* there are flags left unprocessed, likely not supported */ if (*msgbuf & E1000_VT_MSGINFO_MASK) return -EINVAL; return 0; } static int igb_set_vf_multicasts(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; int i; /* salt away the number of multicast addresses assigned * to this VF for later use to restore when the PF multi cast * list changes */ vf_data->num_vf_mc_hashes = n; /* only up to 30 hash values supported */ if (n > 30) n = 30; /* store the hashes for later use */ for (i = 0; i < n; i++) vf_data->vf_mc_hashes[i] = hash_list[i]; /* Flush and reset the mta with the new values */ igb_set_rx_mode(adapter->netdev); return 0; } static void igb_restore_vf_multicasts(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct vf_data_storage *vf_data; int i, j; for (i = 0; i < adapter->vfs_allocated_count; i++) { u32 vmolr = rd32(E1000_VMOLR(i)); vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); vf_data = &adapter->vf_data[i]; if ((vf_data->num_vf_mc_hashes > 30) || (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { vmolr |= E1000_VMOLR_MPME; } else if (vf_data->num_vf_mc_hashes) { vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) igb_mta_set(hw, vf_data->vf_mc_hashes[j]); } wr32(E1000_VMOLR(i), vmolr); } } static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 pool_mask, reg, vid; int i; pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); /* Find the vlan filter for this id */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { reg = rd32(E1000_VLVF(i)); /* remove the vf from the pool */ reg &= ~pool_mask; /* if pool is empty then remove entry from vfta */ if (!(reg & E1000_VLVF_POOLSEL_MASK) && (reg & E1000_VLVF_VLANID_ENABLE)) { reg = 0; vid = reg & E1000_VLVF_VLANID_MASK; igb_vfta_set(hw, vid, false); } wr32(E1000_VLVF(i), reg); } adapter->vf_data[vf].vlans_enabled = 0; } static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) { struct e1000_hw *hw = &adapter->hw; u32 reg, i; /* The vlvf table only exists on 82576 hardware and newer */ if (hw->mac.type < e1000_82576) return -1; /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return -1; /* Find the vlan filter for this id */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { reg = rd32(E1000_VLVF(i)); if ((reg & E1000_VLVF_VLANID_ENABLE) && vid == (reg & E1000_VLVF_VLANID_MASK)) break; } if (add) { if (i == E1000_VLVF_ARRAY_SIZE) { /* Did not find a matching VLAN ID entry that was * enabled. Search for a free filter entry, i.e. * one without the enable bit set */ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { reg = rd32(E1000_VLVF(i)); if (!(reg & E1000_VLVF_VLANID_ENABLE)) break; } } if (i < E1000_VLVF_ARRAY_SIZE) { /* Found an enabled/available entry */ reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); /* if !enabled we need to set this up in vfta */ if (!(reg & E1000_VLVF_VLANID_ENABLE)) { /* add VID to filter table */ igb_vfta_set(hw, vid, true); reg |= E1000_VLVF_VLANID_ENABLE; } reg &= ~E1000_VLVF_VLANID_MASK; reg |= vid; wr32(E1000_VLVF(i), reg); /* do not modify RLPML for PF devices */ if (vf >= adapter->vfs_allocated_count) return 0; if (!adapter->vf_data[vf].vlans_enabled) { u32 size; reg = rd32(E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size += 4; reg &= ~E1000_VMOLR_RLPML_MASK; reg |= size; wr32(E1000_VMOLR(vf), reg); } adapter->vf_data[vf].vlans_enabled++; return 0; } } else { if (i < E1000_VLVF_ARRAY_SIZE) { /* remove vf from the pool */ reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); /* if pool is empty then remove entry from vfta */ if (!(reg & E1000_VLVF_POOLSEL_MASK)) { reg = 0; igb_vfta_set(hw, vid, false); } wr32(E1000_VLVF(i), reg); /* do not modify RLPML for PF devices */ if (vf >= adapter->vfs_allocated_count) return 0; adapter->vf_data[vf].vlans_enabled--; if (!adapter->vf_data[vf].vlans_enabled) { u32 size; reg = rd32(E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size -= 4; reg &= ~E1000_VMOLR_RLPML_MASK; reg |= size; wr32(E1000_VMOLR(vf), reg); } } } return 0; } static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) { struct e1000_hw *hw = &adapter->hw; if (vid) wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); else wr32(E1000_VMVIR(vf), 0); } static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { int err = 0; struct igb_adapter *adapter = netdev_priv(netdev); if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { err = igb_vlvf_set(adapter, vlan, !!vlan, vf); if (err) goto out; igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); igb_set_vmolr(adapter, vf, !vlan); adapter->vf_data[vf].pf_vlan = vlan; adapter->vf_data[vf].pf_qos = qos; dev_info(&adapter->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__IGB_DOWN, &adapter->state)) { dev_warn(&adapter->pdev->dev, "The VF VLAN has been set," " but the PF device is not up.\n"); dev_warn(&adapter->pdev->dev, "Bring the PF device up before" " attempting to use the VF device.\n"); } } else { igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, false, vf); igb_set_vmvir(adapter, vlan, vf); igb_set_vmolr(adapter, vf, true); adapter->vf_data[vf].pf_vlan = 0; adapter->vf_data[vf].pf_qos = 0; } out: return err; } static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); return igb_vlvf_set(adapter, vid, add, vf); } static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) { /* clear flags - except flag that indicates PF has set the MAC */ adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; adapter->vf_data[vf].last_nack = jiffies; /* reset offloads to defaults */ igb_set_vmolr(adapter, vf, true); /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); if (adapter->vf_data[vf].pf_vlan) igb_ndo_set_vf_vlan(adapter->netdev, vf, adapter->vf_data[vf].pf_vlan, adapter->vf_data[vf].pf_qos); else igb_clear_vf_vfta(adapter, vf); /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; /* Flush and reset the mta with the new values */ igb_set_rx_mode(adapter->netdev); } static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) { unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; /* generate a new mac address as we were hotplug removed/added */ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) random_ether_addr(vf_mac); /* process remaining reset events */ igb_vf_reset(adapter, vf); } static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; int rar_entry = hw->mac.rar_entry_count - (vf + 1); u32 reg, msgbuf[3]; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ igb_vf_reset(adapter, vf); /* set vf mac address */ igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); /* enable transmit and receive for vf */ reg = rd32(E1000_VFTE); wr32(E1000_VFTE, reg | (1 << vf)); reg = rd32(E1000_VFRE); wr32(E1000_VFRE, reg | (1 << vf)); adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; /* reply to reset with ack and vf mac address */ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; memcpy(addr, vf_mac, 6); igb_write_mbx(hw, msgbuf, 3, vf); } static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) { /* * The VF MAC Address is stored in a packed array of bytes * starting at the second 32 bit word of the msg array */ unsigned char *addr = (char *)&msg[1]; int err = -1; if (is_valid_ether_addr(addr)) err = igb_set_vf_mac(adapter, vf, addr); return err; } static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; u32 msg = E1000_VT_MSGTYPE_NACK; /* if device isn't clear to send it shouldn't be reading either */ if (!(vf_data->flags & IGB_VF_FLAG_CTS) && time_after(jiffies, vf_data->last_nack + (2 * HZ))) { igb_write_mbx(hw, &msg, 1, vf); vf_data->last_nack = jiffies; } } static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) { struct pci_dev *pdev = adapter->pdev; u32 msgbuf[E1000_VFMAILBOX_SIZE]; struct e1000_hw *hw = &adapter->hw; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; s32 retval; retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); if (retval) { /* if receive failed revoke VF CTS stats and restart init */ dev_err(&pdev->dev, "Error receiving message from VF\n"); vf_data->flags &= ~IGB_VF_FLAG_CTS; if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) return; goto out; } /* this is a message we already processed, do nothing */ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) return; /* * until the vf completes a reset it should not be * allowed to start any configuration. */ if (msgbuf[0] == E1000_VF_RESET) { igb_vf_reset_msg(adapter, vf); return; } if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) return; retval = -1; goto out; } switch ((msgbuf[0] & 0xFFFF)) { case E1000_VF_SET_MAC_ADDR: retval = -EINVAL; if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); else dev_warn(&pdev->dev, "VF %d attempted to override administratively " "set MAC address\nReload the VF driver to " "resume operations\n", vf); break; case E1000_VF_SET_PROMISC: retval = igb_set_vf_promisc(adapter, msgbuf, vf); break; case E1000_VF_SET_MULTICAST: retval = igb_set_vf_multicasts(adapter, msgbuf, vf); break; case E1000_VF_SET_LPE: retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); break; case E1000_VF_SET_VLAN: retval = -1; if (vf_data->pf_vlan) dev_warn(&pdev->dev, "VF %d attempted to override administratively " "set VLAN tag\nReload the VF driver to " "resume operations\n", vf); else retval = igb_set_vf_vlan(adapter, msgbuf, vf); break; default: dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); retval = -1; break; } msgbuf[0] |= E1000_VT_MSGTYPE_CTS; out: /* notify the VF of the results of what it sent us */ if (retval) msgbuf[0] |= E1000_VT_MSGTYPE_NACK; else msgbuf[0] |= E1000_VT_MSGTYPE_ACK; igb_write_mbx(hw, msgbuf, 1, vf); } static void igb_msg_task(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 vf; for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { /* process any reset requests */ if (!igb_check_for_rst(hw, vf)) igb_vf_reset_event(adapter, vf); /* process any messages pending */ if (!igb_check_for_msg(hw, vf)) igb_rcv_msg_from_vf(adapter, vf); /* process any acks */ if (!igb_check_for_ack(hw, vf)) igb_rcv_ack_from_vf(adapter, vf); } } /** * igb_set_uta - Set unicast filter table address * @adapter: board private structure * * The unicast table address is a register array of 32-bit registers. * The table is meant to be used in a way similar to how the MTA is used * however due to certain limitations in the hardware it is necessary to * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ static void igb_set_uta(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; /* The UTA table only exists on 82576 hardware and newer */ if (hw->mac.type < e1000_82576) return; /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return; for (i = 0; i < hw->mac.uta_reg_count; i++) array_wr32(E1000_UTA, i, ~0); } /** * igb_intr_msi - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t igb_intr_msi(int irq, void *data) { struct igb_adapter *adapter = data; struct igb_q_vector *q_vector = adapter->q_vector[0]; struct e1000_hw *hw = &adapter->hw; /* read ICR disables interrupts using IAM */ u32 icr = rd32(E1000_ICR); igb_write_itr(q_vector); if (icr & E1000_ICR_DRSTA) schedule_work(&adapter->reset_task); if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; } if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { hw->mac.get_link_status = 1; if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } napi_schedule(&q_vector->napi); return IRQ_HANDLED; } /** * igb_intr - Legacy Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t igb_intr(int irq, void *data) { struct igb_adapter *adapter = data; struct igb_q_vector *q_vector = adapter->q_vector[0]; struct e1000_hw *hw = &adapter->hw; /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No * need for the IMC write */ u32 icr = rd32(E1000_ICR); if (!icr) return IRQ_NONE; /* Not our interrupt */ igb_write_itr(q_vector); /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ if (!(icr & E1000_ICR_INT_ASSERTED)) return IRQ_NONE; if (icr & E1000_ICR_DRSTA) schedule_work(&adapter->reset_task); if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; } if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { hw->mac.get_link_status = 1; /* guard against interrupt when we're going down */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } napi_schedule(&q_vector->napi); return IRQ_HANDLED; } static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { if (!adapter->msix_entries) igb_set_itr(adapter); else igb_update_ring_itr(q_vector); } if (!test_bit(__IGB_DOWN, &adapter->state)) { if (adapter->msix_entries) wr32(E1000_EIMS, q_vector->eims_value); else igb_irq_enable(adapter); } } /** * igb_poll - NAPI Rx polling callback * @napi: napi polling structure * @budget: count of how many packets we should handle **/ static int igb_poll(struct napi_struct *napi, int budget) { struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi); int tx_clean_complete = 1, work_done = 0; #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); #endif if (q_vector->tx_ring) tx_clean_complete = igb_clean_tx_irq(q_vector); if (q_vector->rx_ring) igb_clean_rx_irq_adv(q_vector, &work_done, budget); if (!tx_clean_complete) work_done = budget; /* If not enough Rx work done, exit the polling mode */ if (work_done < budget) { napi_complete(napi); igb_ring_irq_enable(q_vector); } return work_done; } /** * igb_systim_to_hwtstamp - convert system time value to hw timestamp * @adapter: board private structure * @shhwtstamps: timestamp structure to update * @regval: unsigned 64bit system time value. * * We need to convert the system time value stored in the RX/TXSTMP registers * into a hwtstamp which can be used by the upper level timestamping functions */ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, struct skb_shared_hwtstamps *shhwtstamps, u64 regval) { u64 ns; /* * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to * 24 to match clock shift we setup earlier. */ if (adapter->hw.mac.type == e1000_82580) regval <<= IGB_82580_TSYNC_SHIFT; ns = timecounter_cyc2time(&adapter->clock, regval); timecompare_update(&adapter->compare, ns); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(ns); shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); } /** * igb_tx_hwtstamp - utility function which checks for TX time stamp * @q_vector: pointer to q_vector containing needed info * @buffer: pointer to igb_buffer structure * * If we were asked to do hardware stamping and such a time stamp is * available, then it must have been for this skb here because we only * allow only one such packet into the queue. */ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval; /* if skb does not support hw timestamp or TX stamp not valid exit */ if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) || !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) return; regval = rd32(E1000_TXSTMPL); regval |= (u64)rd32(E1000_TXSTMPH) << 32; igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); skb_tstamp_tx(buffer_info->skb, &shhwtstamps); } /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info * returns true if ring is completely cleaned **/ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *tx_ring = q_vector->tx_ring; struct net_device *netdev = tx_ring->netdev; struct e1000_hw *hw = &adapter->hw; struct igb_buffer *buffer_info; union e1000_adv_tx_desc *tx_desc, *eop_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int i, eop, count = 0; bool cleaned = false; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); if (buffer_info->skb) { total_bytes += buffer_info->bytecount; /* gso_segs is currently only valid for tcp */ total_packets += buffer_info->gso_segs; igb_tx_hwtstamp(q_vector, buffer_info); } igb_unmap_and_free_tx_resource(tx_ring, buffer_info); tx_desc->wb.status = 0; i++; if (i == tx_ring->count) i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); } tx_ring->next_to_clean = i; if (unlikely(count && netif_carrier_ok(netdev) && igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !(test_bit(__IGB_DOWN, &adapter->state))) { netif_wake_subqueue(netdev, tx_ring->queue_index); u64_stats_update_begin(&tx_ring->tx_syncp); tx_ring->tx_stats.restart_queue++; u64_stats_update_end(&tx_ring->tx_syncp); } } if (tx_ring->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ tx_ring->detect_tx_hung = false; if (tx_ring->buffer_info[i].time_stamp && time_after(jiffies, tx_ring->buffer_info[i].time_stamp + (adapter->tx_timeout_factor * HZ)) && !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ dev_err(tx_ring->dev, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" " desc.status <%x>\n", tx_ring->queue_index, readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, eop, jiffies, eop_desc->wb.status); netif_stop_subqueue(netdev, tx_ring->queue_index); } } tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; u64_stats_update_begin(&tx_ring->tx_syncp); tx_ring->tx_stats.bytes += total_bytes; tx_ring->tx_stats.packets += total_packets; u64_stats_update_end(&tx_ring->tx_syncp); return count < tx_ring->count; } /** * igb_receive_skb - helper function to handle rx indications * @q_vector: structure containing interrupt and ring information * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ static void igb_receive_skb(struct igb_q_vector *q_vector, struct sk_buff *skb, u16 vlan_tag) { struct igb_adapter *adapter = q_vector->adapter; if (vlan_tag && adapter->vlgrp) vlan_gro_receive(&q_vector->napi, adapter->vlgrp, vlan_tag, skb); else napi_gro_receive(&q_vector->napi, skb); } static inline void igb_rx_checksum_adv(struct igb_ring *ring, u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Ignore Checksum bit is set or checksum is disabled through ethtool */ if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || (status_err & E1000_RXD_STAT_IXSM)) return; /* TCP/UDP checksum error bit is set */ if (status_err & (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { /* * work around errata with sctp packets where the TCPE aka * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) * packets, (aka let the stack check the crc32c) */ if ((skb->len == 60) && (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { u64_stats_update_begin(&ring->rx_syncp); ring->rx_stats.csum_err++; u64_stats_update_end(&ring->rx_syncp); } /* let the stack verify checksum errors */ return; } /* It must be a TCP or UDP packet with a valid checksum */ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err); } static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, struct sk_buff *skb) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; u64 regval; /* * If this bit is set, then the RX registers contain the time stamp. No * other packet will be time stamped until we read these registers, so * read the registers to make them available again. Because only one * packet can be time stamped at a time, we know that the register * values must belong to this one here and therefore we don't need to * compare any of the additional attributes stored for it. * * If nothing went wrong, then it should have a shared tx_flags that we * can turn into a skb_shared_hwtstamps. */ if (staterr & E1000_RXDADV_STAT_TSIP) { u32 *stamp = (u32 *)skb->data; regval = le32_to_cpu(*(stamp + 2)); regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; skb_pull(skb, IGB_TS_HDR_LEN); } else { if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) return; regval = rd32(E1000_RXSTMPL); regval |= (u64)rd32(E1000_RXSTMPH) << 32; } igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } static inline u16 igb_get_hlen(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc) { /* HW will not DMA in data larger than the given buffer, even if it * parses the (NFS, of course) header to be larger. In that case, it * fills the header buffer and spills the rest into the page. */ u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > rx_ring->rx_buffer_len) hlen = rx_ring->rx_buffer_len; return hlen; } static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, int *work_done, int budget) { struct igb_ring *rx_ring = q_vector->rx_ring; struct net_device *netdev = rx_ring->netdev; struct device *dev = rx_ring->dev; union e1000_adv_rx_desc *rx_desc , *next_rxd; struct igb_buffer *buffer_info , *next_buffer; struct sk_buff *skb; bool cleaned = false; int cleaned_count = 0; int current_node = numa_node_id(); unsigned int total_bytes = 0, total_packets = 0; unsigned int i; u32 staterr; u16 length; u16 vlan_tag; i = rx_ring->next_to_clean; buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); while (staterr & E1000_RXD_STAT_DD) { if (*work_done >= budget) break; (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); buffer_info->skb = NULL; i++; if (i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; length = le16_to_cpu(rx_desc->wb.upper.length); cleaned = true; cleaned_count++; if (buffer_info->dma) { dma_unmap_single(dev, buffer_info->dma, rx_ring->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { skb_put(skb, length); goto send_up; } skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); } if (length) { dma_unmap_page(dev, buffer_info->page_dma, PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, buffer_info->page, buffer_info->page_offset, length); if ((page_count(buffer_info->page) != 1) || (page_to_nid(buffer_info->page) != current_node)) buffer_info->page = NULL; else get_page(buffer_info->page); skb->len += length; skb->data_len += length; skb->truesize += length; } if (!(staterr & E1000_RXD_STAT_EOP)) { buffer_info->skb = next_buffer->skb; buffer_info->dma = next_buffer->dma; next_buffer->skb = skb; next_buffer->dma = 0; goto next_desc; } send_up: if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { dev_kfree_skb_irq(skb); goto next_desc; } if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS)) igb_rx_hwtstamp(q_vector, staterr, skb); total_bytes += skb->len; total_packets++; igb_rx_checksum_adv(rx_ring, staterr, skb); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, rx_ring->queue_index); vlan_tag = ((staterr & E1000_RXD_STAT_VP) ? le16_to_cpu(rx_desc->wb.upper.vlan) : 0); igb_receive_skb(q_vector, skb, vlan_tag); next_desc: rx_desc->wb.upper.status_error = 0; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; cleaned_count = igb_desc_unused(rx_ring); if (cleaned_count) igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); rx_ring->total_packets += total_packets; rx_ring->total_bytes += total_bytes; u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; u64_stats_update_end(&rx_ring->rx_syncp); return cleaned; } /** * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split * @adapter: address of board private structure **/ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) { struct net_device *netdev = rx_ring->netdev; union e1000_adv_rx_desc *rx_desc; struct igb_buffer *buffer_info; struct sk_buff *skb; unsigned int i; int bufsz; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; bufsz = rx_ring->rx_buffer_len; while (cleaned_count--) { rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { if (!buffer_info->page) { buffer_info->page = netdev_alloc_page(netdev); if (unlikely(!buffer_info->page)) { u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } buffer_info->page_offset = 0; } else { buffer_info->page_offset ^= PAGE_SIZE / 2; } buffer_info->page_dma = dma_map_page(rx_ring->dev, buffer_info->page, buffer_info->page_offset, PAGE_SIZE / 2, DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, buffer_info->page_dma)) { buffer_info->page_dma = 0; u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } } skb = buffer_info->skb; if (!skb) { skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (unlikely(!skb)) { u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } buffer_info->skb = skb; } if (!buffer_info->dma) { buffer_info->dma = dma_map_single(rx_ring->dev, skb->data, bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, buffer_info->dma)) { buffer_info->dma = 0; u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ if (bufsz < IGB_RXBUFFER_1024) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } i++; if (i == rx_ring->count) i = 0; buffer_info = &rx_ring->buffer_info[i]; } no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; if (i == 0) i = (rx_ring->count - 1); else i--; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(i, rx_ring->tail); } } /** * igb_mii_ioctl - * @netdev: * @ifreq: * @cmd: **/ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); if (adapter->hw.phy.media_type != e1000_media_type_copper) return -EOPNOTSUPP; switch (cmd) { case SIOCGMIIPHY: data->phy_id = adapter->hw.phy.addr; break; case SIOCGMIIREG: if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, &data->val_out)) return -EIO; break; case SIOCSMIIREG: default: return -EOPNOTSUPP; } return 0; } /** * igb_hwtstamp_ioctl - control hardware time stamping * @netdev: * @ifreq: * @cmd: * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't case any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware * filters. Not all combinations are supported, in particular event * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". * **/ static int igb_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct hwtstamp_config config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 tsync_rx_cfg = 0; bool is_l4 = false; bool is_l2 = false; u32 regval; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; /* reserved for future extensions */ if (config.flags) return -EINVAL; switch (config.tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_ALL: /* * register TSYNCRXCFG must be set, therefore it is not * possible to time stamp both Sync and Delay_Req messages * => fall back to time stamping all packets */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; config.rx_filter = HWTSTAMP_FILTER_ALL; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; is_l2 = true; is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; is_l2 = true; is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; break; default: return -ERANGE; } if (hw->mac.type == e1000_82575) { if (tsync_rx_ctl | tsync_tx_ctl) return -EINVAL; return 0; } /* * Per-packet timestamping only works if all packets are * timestamped, so enable timestamping in all packets as * long as one rx filter was configured. */ if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) { tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; } /* enable/disable TX */ regval = rd32(E1000_TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; wr32(E1000_TSYNCTXCTL, regval); /* enable/disable RX */ regval = rd32(E1000_TSYNCRXCTL); regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; wr32(E1000_TSYNCRXCTL, regval); /* define which PTP packets are time stamped */ wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); /* define ethertype filter for timestamped packets */ if (is_l2) wr32(E1000_ETQF(3), (E1000_ETQF_FILTER_ENABLE | /* enable filter */ E1000_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else wr32(E1000_ETQF(3), 0); #define PTP_PORT 319 /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { u32 ftqf = (IPPROTO_UDP /* UDP */ | E1000_FTQF_VF_BP /* VF not compared */ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ | E1000_FTQF_MASK); /* mask all inputs */ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ wr32(E1000_IMIR(3), htons(PTP_PORT)); wr32(E1000_IMIREXT(3), (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); if (hw->mac.type == e1000_82576) { /* enable source port check */ wr32(E1000_SPQF(3), htons(PTP_PORT)); ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; } wr32(E1000_FTQF(3), ftqf); } else { wr32(E1000_FTQF(3), E1000_FTQF_MASK); } wrfl(); adapter->hwtstamp_config = config; /* clear TX/RX time stamp registers, just to be sure */ regval = rd32(E1000_TXSTMPH); regval = rd32(E1000_RXSTMPH); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } /** * igb_ioctl - * @netdev: * @ifreq: * @cmd: **/ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); case SIOCSHWTSTAMP: return igb_hwtstamp_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; u16 cap_offset; cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); if (!cap_offset) return -E1000_ERR_CONFIG; pci_read_config_word(adapter->pdev, cap_offset + reg, value); return 0; } s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; u16 cap_offset; cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); if (!cap_offset) return -E1000_ERR_CONFIG; pci_write_config_word(adapter->pdev, cap_offset + reg, *value); return 0; } static void igb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl; igb_irq_disable(adapter); adapter->vlgrp = grp; if (grp) { /* enable VLAN tag insert/strip */ ctrl = rd32(E1000_CTRL); ctrl |= E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); /* Disable CFI check */ rctl = rd32(E1000_RCTL); rctl &= ~E1000_RCTL_CFIEN; wr32(E1000_RCTL, rctl); } else { /* disable VLAN tag insert/strip */ ctrl = rd32(E1000_CTRL); ctrl &= ~E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); } igb_rlpml_set(adapter); if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); } static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; /* attempt to add filter to vlvf array */ igb_vlvf_set(adapter, vid, true, pf_id); /* add the filter since PF can receive vlans w/o entry in vlvf */ igb_vfta_set(hw, vid, true); } static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; s32 err; igb_irq_disable(adapter); vlan_group_set_device(adapter->vlgrp, vid, NULL); if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); /* remove vlan from VLVF table array */ err = igb_vlvf_set(adapter, vid, false, pf_id); /* if vid was not present in VLVF just remove it from table */ if (err) igb_vfta_set(hw, vid, false); } static void igb_restore_vlan(struct igb_adapter *adapter) { igb_vlan_rx_register(adapter->netdev, adapter->vlgrp); if (adapter->vlgrp) { u16 vid; for (vid = 0; vid < VLAN_N_VID; vid++) { if (!vlan_group_get_device(adapter->vlgrp, vid)) continue; igb_vlan_rx_add_vid(adapter->netdev, vid); } } } int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) { struct pci_dev *pdev = adapter->pdev; struct e1000_mac_info *mac = &adapter->hw.mac; mac->autoneg = 0; /* Make sure dplx is at most 1 bit and lsb of speed is not set * for the switch() below to work */ if ((spd & 1) || (dplx & ~1)) goto err_inval; /* Fiber NIC's only allow 1000 Gbps Full duplex */ if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && spd != SPEED_1000 && dplx != DUPLEX_FULL) goto err_inval; switch (spd + dplx) { case SPEED_10 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_10_HALF; break; case SPEED_10 + DUPLEX_FULL: mac->forced_speed_duplex = ADVERTISE_10_FULL; break; case SPEED_100 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_100_HALF; break; case SPEED_100 + DUPLEX_FULL: mac->forced_speed_duplex = ADVERTISE_100_FULL; break; case SPEED_1000 + DUPLEX_FULL: mac->autoneg = 1; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: goto err_inval; } return 0; err_inval: dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); if (netif_running(netdev)) igb_close(netdev); igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; if (wufc) { igb_setup_rctl(adapter); igb_set_rx_mode(netdev); /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & E1000_WUFC_MC) { rctl = rd32(E1000_RCTL); rctl |= E1000_RCTL_MPE; wr32(E1000_RCTL, rctl); } ctrl = rd32(E1000_CTRL); /* advertise wake from D3Cold */ #define E1000_CTRL_ADVD3WUC 0x00100000 /* phy power management enable */ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; wr32(E1000_CTRL, ctrl); /* Allow time for pending master requests to run */ igb_disable_pcie_master(hw); wr32(E1000_WUC, E1000_WUC_PME_EN); wr32(E1000_WUFC, wufc); } else { wr32(E1000_WUC, 0); wr32(E1000_WUFC, 0); } *enable_wake = wufc || adapter->en_mng_pt; if (!*enable_wake) igb_power_down_link(adapter); else igb_power_up_link(adapter); /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ igb_release_hw_control(adapter); pci_disable_device(pdev); return 0; } #ifdef CONFIG_PM static int igb_suspend(struct pci_dev *pdev, pm_message_t state) { int retval; bool wake; retval = __igb_shutdown(pdev, &wake); if (retval) return retval; if (wake) { pci_prepare_to_sleep(pdev); } else { pci_wake_from_d3(pdev, false); pci_set_power_state(pdev, PCI_D3hot); } return 0; } static int igb_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "igb: Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } igb_reset(adapter); /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); wr32(E1000_WUS, ~0); if (netif_running(netdev)) { err = igb_open(netdev); if (err) return err; } netif_device_attach(netdev); return 0; } #endif static void igb_shutdown(struct pci_dev *pdev) { bool wake; __igb_shutdown(pdev, &wake); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); pci_set_power_state(pdev, PCI_D3hot); } } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void igb_netpoll(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int i; if (!adapter->msix_entries) { struct igb_q_vector *q_vector = adapter->q_vector[0]; igb_irq_disable(adapter); napi_schedule(&q_vector->napi); return; } for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; wr32(E1000_EIMC, q_vector->eims_value); napi_schedule(&q_vector->napi); } } #endif /* CONFIG_NET_POLL_CONTROLLER */ /** * igb_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) igb_down(adapter); pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * igb_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the igb_resume routine. */ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; pci_ers_result_t result; int err; if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); igb_reset(adapter); wr32(E1000_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err) { dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " "failed 0x%0x\n", err); /* non-fatal, continue */ } return result; } /** * igb_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the igb_resume routine. */ static void igb_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) { if (igb_up(adapter)) { dev_err(&pdev->dev, "igb_up failed after reset\n"); return; } } netif_device_attach(netdev); /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); } static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, u8 qsel) { u32 rar_low, rar_high; struct e1000_hw *hw = &adapter->hw; /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* Indicate to hardware the Address is Valid. */ rar_high |= E1000_RAH_AV; if (hw->mac.type == e1000_82575) rar_high |= E1000_RAH_POOL_1 * qsel; else rar_high |= E1000_RAH_POOL_1 << qsel; wr32(E1000_RAL(index), rar_low); wrfl(); wr32(E1000_RAH(index), rar_high); wrfl(); } static int igb_set_vf_mac(struct igb_adapter *adapter, int vf, unsigned char *mac_addr) { struct e1000_hw *hw = &adapter->hw; /* VF MAC addresses start at end of receive addresses and moves * torwards the first, as a result a collision should not be possible */ int rar_entry = hw->mac.rar_entry_count - (vf + 1); memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); return 0; } static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct igb_adapter *adapter = netdev_priv(netdev); if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) return -EINVAL; adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" " change effective."); if (test_bit(__IGB_DOWN, &adapter->state)) { dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," " but the PF device is not up.\n"); dev_warn(&adapter->pdev->dev, "Bring the PF device up before" " attempting to use the VF device.\n"); } return igb_set_vf_mac(adapter, vf, mac); } static int igb_link_mbps(int internal_link_speed) { switch (internal_link_speed) { case SPEED_100: return 100; case SPEED_1000: return 1000; default: return 0; } } static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, int link_speed) { int rf_dec, rf_int; u32 bcnrc_val; if (tx_rate != 0) { /* Calculate the rate factor values to set */ rf_int = link_speed / tx_rate; rf_dec = (link_speed - (rf_int * tx_rate)); rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; bcnrc_val = E1000_RTTBCNRC_RS_ENA; bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & E1000_RTTBCNRC_RF_INT_MASK); bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); } else { bcnrc_val = 0; } wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ wr32(E1000_RTTBCNRC, bcnrc_val); } static void igb_check_vf_rate_limit(struct igb_adapter *adapter) { int actual_link_speed, i; bool reset_rate = false; /* VF TX rate limit was not set or not supported */ if ((adapter->vf_rate_link_speed == 0) || (adapter->hw.mac.type != e1000_82576)) return; actual_link_speed = igb_link_mbps(adapter->link_speed); if (actual_link_speed != adapter->vf_rate_link_speed) { reset_rate = true; adapter->vf_rate_link_speed = 0; dev_info(&adapter->pdev->dev, "Link speed has been changed. VF Transmit " "rate is disabled\n"); } for (i = 0; i < adapter->vfs_allocated_count; i++) { if (reset_rate) adapter->vf_data[i].tx_rate = 0; igb_set_vf_rate_limit(&adapter->hw, i, adapter->vf_data[i].tx_rate, actual_link_speed); } } static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int actual_link_speed; if (hw->mac.type != e1000_82576) return -EOPNOTSUPP; actual_link_speed = igb_link_mbps(adapter->link_speed); if ((vf >= adapter->vfs_allocated_count) || (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || (tx_rate < 0) || (tx_rate > actual_link_speed)) return -EINVAL; adapter->vf_rate_link_speed = actual_link_speed; adapter->vf_data[vf].tx_rate = (u16)tx_rate; igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); return 0; } static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct igb_adapter *adapter = netdev_priv(netdev); if (vf >= adapter->vfs_allocated_count) return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); ivi->tx_rate = adapter->vf_data[vf].tx_rate; ivi->vlan = adapter->vf_data[vf].pf_vlan; ivi->qos = adapter->vf_data[vf].pf_qos; return 0; } static void igb_vmm_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; switch (hw->mac.type) { case e1000_82575: default: /* replication is not supported for 82575 */ return; case e1000_82576: /* notify HW that the MAC is adding vlan tags */ reg = rd32(E1000_DTXCTL); reg |= E1000_DTXCTL_VLAN_ADDED; wr32(E1000_DTXCTL, reg); case e1000_82580: /* enable replication vlan tag stripping */ reg = rd32(E1000_RPLOLR); reg |= E1000_RPLOLR_STRVLAN; wr32(E1000_RPLOLR, reg); case e1000_i350: /* none of the above registers are supported by i350 */ break; } if (adapter->vfs_allocated_count) { igb_vmdq_set_loopback_pf(hw, true); igb_vmdq_set_replication_pf(hw, true); igb_vmdq_set_anti_spoofing_pf(hw, true, adapter->vfs_allocated_count); } else { igb_vmdq_set_loopback_pf(hw, false); igb_vmdq_set_replication_pf(hw, false); } } /* igb_main.c */
gpl-2.0
voodik/android_kernel_hardkernel_odroidxu3
drivers/gpu/drm/nouveau/nouveau_abi16.c
2074
13123
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <core/object.h> #include <core/client.h> #include <core/device.h> #include <core/class.h> #include <core/mm.h> #include <subdev/fb.h> #include <subdev/timer.h> #include <subdev/instmem.h> #include <engine/graph.h> #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_gem.h" #include "nouveau_chan.h" #include "nouveau_abi16.h" struct nouveau_abi16 * nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev) { struct nouveau_cli *cli = nouveau_cli(file_priv); mutex_lock(&cli->mutex); if (!cli->abi16) { struct nouveau_abi16 *abi16; cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL); if (cli->abi16) { INIT_LIST_HEAD(&abi16->channels); abi16->client = nv_object(cli); /* allocate device object targeting client's default * device (ie. the one that belongs to the fd it * opened) */ if (nouveau_object_new(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE, 0x0080, &(struct nv_device_class) { .device = ~0ULL, }, sizeof(struct nv_device_class), &abi16->device) == 0) return cli->abi16; kfree(cli->abi16); cli->abi16 = NULL; } mutex_unlock(&cli->mutex); } return cli->abi16; } int nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) { struct nouveau_cli *cli = (void *)abi16->client; mutex_unlock(&cli->mutex); return ret; } u16 nouveau_abi16_swclass(struct nouveau_drm *drm) { switch (nv_device(drm->device)->card_type) { case NV_04: return 0x006e; case NV_10: case NV_20: case NV_30: case NV_40: return 0x016e; case NV_50: return 0x506e; case NV_C0: case NV_D0: case NV_E0: return 0x906e; } return 0x0000; } static void nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan, struct nouveau_abi16_ntfy *ntfy) { nouveau_mm_free(&chan->heap, &ntfy->node); list_del(&ntfy->head); kfree(ntfy); } static void nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, struct nouveau_abi16_chan *chan) { struct nouveau_abi16_ntfy *ntfy, *temp; /* wait for all activity to stop before releasing notify object, which * may be still in use */ if (chan->chan && chan->ntfy) nouveau_channel_idle(chan->chan); /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { nouveau_abi16_ntfy_fini(chan, ntfy); } if (chan->ntfy) { nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); drm_gem_object_unreference_unlocked(chan->ntfy->gem); } if (chan->heap.block_size) nouveau_mm_fini(&chan->heap); /* destroy channel object, all children will be killed too */ if (chan->chan) { abi16->handles &= ~(1 << (chan->chan->handle & 0xffff)); nouveau_channel_del(&chan->chan); } list_del(&chan->head); kfree(chan); } void nouveau_abi16_fini(struct nouveau_abi16 *abi16) { struct nouveau_cli *cli = (void *)abi16->client; struct nouveau_abi16_chan *chan, *temp; /* cleanup channels */ list_for_each_entry_safe(chan, temp, &abi16->channels, head) { nouveau_abi16_chan_fini(abi16, chan); } /* destroy the device object */ nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE); kfree(cli->abi16); cli->abi16 = NULL; } int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_timer *ptimer = nouveau_timer(device); struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR); struct drm_nouveau_getparam *getparam = data; switch (getparam->param) { case NOUVEAU_GETPARAM_CHIPSET_ID: getparam->value = device->chipset; break; case NOUVEAU_GETPARAM_PCI_VENDOR: getparam->value = dev->pci_vendor; break; case NOUVEAU_GETPARAM_PCI_DEVICE: getparam->value = dev->pci_device; break; case NOUVEAU_GETPARAM_BUS_TYPE: if (drm_pci_device_is_agp(dev)) getparam->value = 0; else if (!pci_is_pcie(dev->pdev)) getparam->value = 1; else getparam->value = 2; break; case NOUVEAU_GETPARAM_FB_SIZE: getparam->value = drm->gem.vram_available; break; case NOUVEAU_GETPARAM_AGP_SIZE: getparam->value = drm->gem.gart_available; break; case NOUVEAU_GETPARAM_VM_VRAM_BASE: getparam->value = 0; /* deprecated */ break; case NOUVEAU_GETPARAM_PTIMER_TIME: getparam->value = ptimer->read(ptimer); break; case NOUVEAU_GETPARAM_HAS_BO_USAGE: getparam->value = 1; break; case NOUVEAU_GETPARAM_HAS_PAGEFLIP: getparam->value = 1; break; case NOUVEAU_GETPARAM_GRAPH_UNITS: getparam->value = graph->units ? graph->units(graph) : 0; break; default: nv_debug(device, "unknown parameter %lld\n", getparam->param); return -EINVAL; } return 0; } int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS) { return -EINVAL; } int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) { struct drm_nouveau_channel_alloc *init = data; struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_abi16_chan *chan; struct nouveau_client *client; struct nouveau_device *device; struct nouveau_instmem *imem; struct nouveau_fb *pfb; int ret; if (unlikely(!abi16)) return -ENOMEM; if (!drm->channel) return nouveau_abi16_put(abi16, -ENODEV); client = nv_client(abi16->client); device = nv_device(abi16->device); imem = nouveau_instmem(device); pfb = nouveau_fb(device); /* hack to allow channel engine type specification on kepler */ if (device->card_type >= NV_E0) { if (init->fb_ctxdma_handle != ~0) init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; else init->fb_ctxdma_handle = init->tt_ctxdma_handle; /* allow flips to be executed if this is a graphics channel */ init->tt_ctxdma_handle = 0; if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR) init->tt_ctxdma_handle = 1; } if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return nouveau_abi16_put(abi16, -EINVAL); /* allocate "abi16 channel" data and make up a handle for it */ init->channel = ffsll(~abi16->handles); if (!init->channel--) return nouveau_abi16_put(abi16, -ENOSPC); chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return nouveau_abi16_put(abi16, -ENOMEM); INIT_LIST_HEAD(&chan->notifiers); list_add(&chan->head, &abi16->channels); abi16->handles |= (1 << init->channel); /* create channel object and initialise dma and fence management */ ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | init->channel, init->fb_ctxdma_handle, init->tt_ctxdma_handle, &chan->chan); if (ret) goto done; if (device->card_type >= NV_50) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; else if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; else init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; if (device->card_type < NV_C0) { init->subchan[0].handle = 0x00000000; init->subchan[0].grclass = 0x0000; init->subchan[1].handle = NvSw; init->subchan[1].grclass = 0x506e; init->nr_subchan = 2; } /* Named memory object area */ ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 0, 0, &chan->ntfy); if (ret == 0) ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT); if (ret) goto done; if (device->card_type >= NV_50) { ret = nouveau_bo_vma_add(chan->ntfy, client->vm, &chan->ntfy_vma); if (ret) goto done; } ret = drm_gem_handle_create(file_priv, chan->ntfy->gem, &init->notifier_handle); if (ret) goto done; ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1); done: if (ret) nouveau_abi16_chan_fini(abi16, chan); return nouveau_abi16_put(abi16, ret); } int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) { struct drm_nouveau_channel_free *req = data; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_abi16_chan *chan; int ret = -ENOENT; if (unlikely(!abi16)) return -ENOMEM; list_for_each_entry(chan, &abi16->channels, head) { if (chan->chan->handle == (NVDRM_CHAN | req->channel)) { nouveau_abi16_chan_fini(abi16, chan); return nouveau_abi16_put(abi16, 0); } } return nouveau_abi16_put(abi16, ret); } int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) { struct drm_nouveau_grobj_alloc *init = data; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_object *object; int ret; if (unlikely(!abi16)) return -ENOMEM; if (init->handle == ~0) return nouveau_abi16_put(abi16, -EINVAL); /* compatibility with userspace that assumes 506e for all chipsets */ if (init->class == 0x506e) { init->class = nouveau_abi16_swclass(drm); if (init->class == 0x906e) return nouveau_abi16_put(abi16, 0); } ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel, init->handle, init->class, NULL, 0, &object); return nouveau_abi16_put(abi16, ret); } int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) { struct drm_nouveau_notifierobj_alloc *info = data; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_abi16_chan *chan = NULL, *temp; struct nouveau_abi16_ntfy *ntfy; struct nouveau_object *object; struct nv_dma_class args = {}; int ret; if (unlikely(!abi16)) return -ENOMEM; /* completely unnecessary for these chipsets... */ if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) return nouveau_abi16_put(abi16, -EINVAL); list_for_each_entry(temp, &abi16->channels, head) { if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { chan = temp; break; } } if (!chan) return nouveau_abi16_put(abi16, -ENOENT); ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL); if (!ntfy) return nouveau_abi16_put(abi16, -ENOMEM); list_add(&ntfy->head, &chan->notifiers); ntfy->handle = info->handle; ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1, &ntfy->node); if (ret) goto done; args.start = ntfy->node->offset; args.limit = ntfy->node->offset + ntfy->node->length - 1; if (device->card_type >= NV_50) { args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; args.start += chan->ntfy_vma.offset; args.limit += chan->ntfy_vma.offset; } else if (drm->agp.stat == ENABLED) { args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; args.start += drm->agp.base + chan->ntfy->bo.offset; args.limit += drm->agp.base + chan->ntfy->bo.offset; } else { args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; args.start += chan->ntfy->bo.offset; args.limit += chan->ntfy->bo.offset; } ret = nouveau_object_new(abi16->client, chan->chan->handle, ntfy->handle, 0x003d, &args, sizeof(args), &object); if (ret) goto done; done: if (ret) nouveau_abi16_ntfy_fini(chan, ntfy); return nouveau_abi16_put(abi16, ret); } int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) { struct drm_nouveau_gpuobj_free *fini = data; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_abi16_chan *chan = NULL, *temp; struct nouveau_abi16_ntfy *ntfy; int ret; if (unlikely(!abi16)) return -ENOMEM; list_for_each_entry(temp, &abi16->channels, head) { if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { chan = temp; break; } } if (!chan) return nouveau_abi16_put(abi16, -ENOENT); /* synchronize with the user channel and destroy the gpu object */ nouveau_channel_idle(chan->chan); ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle); if (ret) return nouveau_abi16_put(abi16, ret); /* cleanup extra state if this object was a notifier */ list_for_each_entry(ntfy, &chan->notifiers, head) { if (ntfy->handle == fini->handle) { nouveau_mm_free(&chan->heap, &ntfy->node); list_del(&ntfy->head); break; } } return nouveau_abi16_put(abi16, 0); }
gpl-2.0
pritanshchandra/purex_kernel_xolo_black
fs/qnx6/dir.c
2074
7262
/* * QNX6 file system, Linux implementation. * * Version : 1.0.0 * * History : * * 01-02-2012 by Kai Bankett (chaosman@ontika.net) : first release. * 16-02-2012 pagemap extension by Al Viro * */ #include "qnx6.h" static unsigned qnx6_lfile_checksum(char *name, unsigned size) { unsigned crc = 0; char *end = name + size; while (name < end) { crc = ((crc >> 1) + *(name++)) ^ ((crc & 0x00000001) ? 0x80000000 : 0); } return crc; } static struct page *qnx6_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) kmap(page); return page; } static inline unsigned long dir_pages(struct inode *inode) { return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; } static unsigned last_entry(struct inode *inode, unsigned long page_nr) { unsigned long last_byte = inode->i_size; last_byte -= page_nr << PAGE_CACHE_SHIFT; if (last_byte > PAGE_CACHE_SIZE) last_byte = PAGE_CACHE_SIZE; return last_byte / QNX6_DIR_ENTRY_SIZE; } static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, struct qnx6_long_dir_entry *de, struct page **p) { struct qnx6_sb_info *sbi = QNX6_SB(sb); u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */ /* within page */ u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK; struct address_space *mapping = sbi->longfile->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (IS_ERR(page)) return ERR_CAST(page); kmap(*p = page); return (struct qnx6_long_filename *)(page_address(page) + offs); } static int qnx6_dir_longfilename(struct inode *inode, struct qnx6_long_dir_entry *de, void *dirent, loff_t pos, unsigned de_inode, filldir_t filldir) { struct qnx6_long_filename *lf; struct super_block *s = inode->i_sb; struct qnx6_sb_info *sbi = QNX6_SB(s); struct page *page; int lf_size; if (de->de_size != 0xff) { /* error - long filename entries always have size 0xff in direntry */ printk(KERN_ERR "qnx6: invalid direntry size (%i).\n", de->de_size); return 0; } lf = qnx6_longname(s, de, &page); if (IS_ERR(lf)) { printk(KERN_ERR "qnx6:Error reading longname\n"); return 0; } lf_size = fs16_to_cpu(sbi, lf->lf_size); if (lf_size > QNX6_LONG_NAME_MAX) { QNX6DEBUG((KERN_INFO "file %s\n", lf->lf_fname)); printk(KERN_ERR "qnx6:Filename too long (%i)\n", lf_size); qnx6_put_page(page); return 0; } /* calc & validate longfilename checksum mmi 3g filesystem does not have that checksum */ if (!test_opt(s, MMI_FS) && fs32_to_cpu(sbi, de->de_checksum) != qnx6_lfile_checksum(lf->lf_fname, lf_size)) printk(KERN_INFO "qnx6: long filename checksum error.\n"); QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s inode:%u\n", lf_size, lf->lf_fname, de_inode)); if (filldir(dirent, lf->lf_fname, lf_size, pos, de_inode, DT_UNKNOWN) < 0) { qnx6_put_page(page); return 0; } qnx6_put_page(page); /* success */ return 1; } static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = file_inode(filp); struct super_block *s = inode->i_sb; struct qnx6_sb_info *sbi = QNX6_SB(s); loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1); unsigned long npages = dir_pages(inode); unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; bool done = false; if (filp->f_pos >= inode->i_size) return 0; for ( ; !done && n < npages; n++, start = 0) { struct page *page = qnx6_get_page(inode, n); int limit = last_entry(inode, n); struct qnx6_dir_entry *de; int i = start; if (IS_ERR(page)) { printk(KERN_ERR "qnx6_readdir: read failed\n"); filp->f_pos = (n + 1) << PAGE_CACHE_SHIFT; return PTR_ERR(page); } de = ((struct qnx6_dir_entry *)page_address(page)) + start; for (; i < limit; i++, de++, pos += QNX6_DIR_ENTRY_SIZE) { int size = de->de_size; u32 no_inode = fs32_to_cpu(sbi, de->de_inode); if (!no_inode || !size) continue; if (size > QNX6_SHORT_NAME_MAX) { /* long filename detected get the filename from long filename structure / block */ if (!qnx6_dir_longfilename(inode, (struct qnx6_long_dir_entry *)de, dirent, pos, no_inode, filldir)) { done = true; break; } } else { QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s" " inode:%u\n", size, de->de_fname, no_inode)); if (filldir(dirent, de->de_fname, size, pos, no_inode, DT_UNKNOWN) < 0) { done = true; break; } } } qnx6_put_page(page); } filp->f_pos = pos; return 0; } /* * check if the long filename is correct. */ static unsigned qnx6_long_match(int len, const char *name, struct qnx6_long_dir_entry *de, struct inode *dir) { struct super_block *s = dir->i_sb; struct qnx6_sb_info *sbi = QNX6_SB(s); struct page *page; int thislen; struct qnx6_long_filename *lf = qnx6_longname(s, de, &page); if (IS_ERR(lf)) return 0; thislen = fs16_to_cpu(sbi, lf->lf_size); if (len != thislen) { qnx6_put_page(page); return 0; } if (memcmp(name, lf->lf_fname, len) == 0) { qnx6_put_page(page); return fs32_to_cpu(sbi, de->de_inode); } qnx6_put_page(page); return 0; } /* * check if the filename is correct. */ static unsigned qnx6_match(struct super_block *s, int len, const char *name, struct qnx6_dir_entry *de) { struct qnx6_sb_info *sbi = QNX6_SB(s); if (memcmp(name, de->de_fname, len) == 0) return fs32_to_cpu(sbi, de->de_inode); return 0; } unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, struct page **res_page) { struct super_block *s = dir->i_sb; struct qnx6_inode_info *ei = QNX6_I(dir); struct page *page = NULL; unsigned long start, n; unsigned long npages = dir_pages(dir); unsigned ino; struct qnx6_dir_entry *de; struct qnx6_long_dir_entry *lde; *res_page = NULL; if (npages == 0) return 0; start = ei->i_dir_start_lookup; if (start >= npages) start = 0; n = start; do { page = qnx6_get_page(dir, n); if (!IS_ERR(page)) { int limit = last_entry(dir, n); int i; de = (struct qnx6_dir_entry *)page_address(page); for (i = 0; i < limit; i++, de++) { if (len <= QNX6_SHORT_NAME_MAX) { /* short filename */ if (len != de->de_size) continue; ino = qnx6_match(s, len, name, de); if (ino) goto found; } else if (de->de_size == 0xff) { /* deal with long filename */ lde = (struct qnx6_long_dir_entry *)de; ino = qnx6_long_match(len, name, lde, dir); if (ino) goto found; } else printk(KERN_ERR "qnx6: undefined " "filename size in inode.\n"); } qnx6_put_page(page); } if (++n >= npages) n = 0; } while (n != start); return 0; found: *res_page = page; ei->i_dir_start_lookup = n; return ino; } const struct file_operations qnx6_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = qnx6_readdir, .fsync = generic_file_fsync, }; const struct inode_operations qnx6_dir_inode_operations = { .lookup = qnx6_lookup, };
gpl-2.0
cooldudezach/android_kernel_zte_nex
arch/x86/kernel/setup.c
2586
26329
/* * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * * Memory region support * David Parsons <orc@pell.chi.il.us>, July-August 1999 * * Added E820 sanitization routine (removes overlapping memory regions); * Brian Moyle <bmoyle@mvista.com>, February 2001 * * Moved CPU detection code to cpu/${cpu}.c * Patrick Mochel <mochel@osdl.org>, March 2002 * * Provisions for empty E820 memory regions (reported by certain BIOSes). * Alex Achenbach <xela@slit.de>, December 2002. * */ /* * This file handles the architecture-dependent parts of initialization */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/screen_info.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/sfi.h> #include <linux/apm_bios.h> #include <linux/initrd.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/seq_file.h> #include <linux/console.h> #include <linux/mca.h> #include <linux/root_dev.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/efi.h> #include <linux/init.h> #include <linux/edd.h> #include <linux/iscsi_ibft.h> #include <linux/nodemask.h> #include <linux/kexec.h> #include <linux/dmi.h> #include <linux/pfn.h> #include <linux/pci.h> #include <asm/pci-direct.h> #include <linux/init_ohci1394_dma.h> #include <linux/kvm_para.h> #include <linux/dma-contiguous.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/kallsyms.h> #include <linux/cpufreq.h> #include <linux/dma-mapping.h> #include <linux/ctype.h> #include <linux/uaccess.h> #include <linux/percpu.h> #include <linux/crash_dump.h> #include <linux/tboot.h> #include <video/edid.h> #include <asm/mtrr.h> #include <asm/apic.h> #include <asm/trampoline.h> #include <asm/e820.h> #include <asm/mpspec.h> #include <asm/setup.h> #include <asm/efi.h> #include <asm/timer.h> #include <asm/i8259.h> #include <asm/sections.h> #include <asm/dmi.h> #include <asm/io_apic.h> #include <asm/ist.h> #include <asm/setup_arch.h> #include <asm/bios_ebda.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/bugs.h> #include <asm/vsyscall.h> #include <asm/cpu.h> #include <asm/desc.h> #include <asm/dma.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/mmu_context.h> #include <asm/proto.h> #include <asm/paravirt.h> #include <asm/hypervisor.h> #include <asm/olpc_ofw.h> #include <asm/percpu.h> #include <asm/topology.h> #include <asm/apicdef.h> #include <asm/amd_nb.h> #ifdef CONFIG_X86_64 #include <asm/numa_64.h> #endif #include <asm/mce.h> #include <asm/alternative.h> #include <asm/prom.h> /* * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. * The direct mapping extends to max_pfn_mapped, so that we can directly access * apertures, ACPI and other tables without having to play with fixmaps. */ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; #ifdef CONFIG_DMI RESERVE_BRK(dmi_alloc, 65536); #endif static __initdata unsigned long _brk_start = (unsigned long)__brk_base; unsigned long _brk_end = (unsigned long)__brk_base; #ifdef CONFIG_X86_64 int default_cpu_present_to_apicid(int mps_cpu) { return __default_cpu_present_to_apicid(mps_cpu); } int default_check_phys_apicid_present(int phys_apicid) { return __default_check_phys_apicid_present(phys_apicid); } #endif #ifndef CONFIG_DEBUG_BOOT_PARAMS struct boot_params __initdata boot_params; #else struct boot_params boot_params; #endif /* * Machine setup.. */ static struct resource data_resource = { .name = "Kernel data", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource code_resource = { .name = "Kernel code", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource bss_resource = { .name = "Kernel bss", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; #ifdef CONFIG_X86_32 /* cpu data as detected by the assembly code in head.S */ struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; /* common cpu data for all cpus */ struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1}; EXPORT_SYMBOL(boot_cpu_data); static void set_mca_bus(int x) { #ifdef CONFIG_MCA MCA_bus = x; #endif } unsigned int def_to_bigsmp; /* for MCA, but anyone else can use it if they want */ unsigned int machine_id; unsigned int machine_submodel_id; unsigned int BIOS_revision; struct apm_info apm_info; EXPORT_SYMBOL(apm_info); #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) struct ist_info ist_info; EXPORT_SYMBOL(ist_info); #else struct ist_info ist_info; #endif #else struct cpuinfo_x86 boot_cpu_data __read_mostly = { .x86_phys_bits = MAX_PHYSMEM_BITS, }; EXPORT_SYMBOL(boot_cpu_data); #endif #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) unsigned long mmu_cr4_features; #else unsigned long mmu_cr4_features = X86_CR4_PAE; #endif /* Boot loader ID and version as integers, for the benefit of proc_dointvec */ int bootloader_type, bootloader_version; /* * Setup options */ struct screen_info screen_info; EXPORT_SYMBOL(screen_info); struct edid_info edid_info; EXPORT_SYMBOL_GPL(edid_info); extern int root_mountflags; unsigned long saved_video_mode; #define RAMDISK_IMAGE_START_MASK 0x07FF #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 static char __initdata command_line[COMMAND_LINE_SIZE]; #ifdef CONFIG_CMDLINE_BOOL static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; #endif #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) struct edd edd; #ifdef CONFIG_EDD_MODULE EXPORT_SYMBOL(edd); #endif /** * copy_edd() - Copy the BIOS EDD information * from boot_params into a safe place. * */ static inline void __init copy_edd(void) { memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, sizeof(edd.mbr_signature)); memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; edd.edd_info_nr = boot_params.eddbuf_entries; } #else static inline void __init copy_edd(void) { } #endif void * __init extend_brk(size_t size, size_t align) { size_t mask = align - 1; void *ret; BUG_ON(_brk_start == 0); BUG_ON(align & mask); _brk_end = (_brk_end + mask) & ~mask; BUG_ON((char *)(_brk_end + size) > __brk_limit); ret = (void *)_brk_end; _brk_end += size; memset(ret, 0, size); return ret; } #ifdef CONFIG_X86_64 static void __init init_gbpages(void) { if (direct_gbpages && cpu_has_gbpages) printk(KERN_INFO "Using GB pages for direct mapping\n"); else direct_gbpages = 0; } #else static inline void init_gbpages(void) { } static void __init cleanup_highmap(void) { } #endif static void __init reserve_brk(void) { if (_brk_end > _brk_start) memblock_reserve(__pa(_brk_start), __pa(_brk_end) - __pa(_brk_start)); /* Mark brk area as locked down and no longer taking any new allocations */ _brk_start = 0; } #ifdef CONFIG_BLK_DEV_INITRD #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) static void __init relocate_initrd(void) { /* Assume only end is not page aligned */ u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 area_size = PAGE_ALIGN(ramdisk_size); u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; u64 ramdisk_here; unsigned long slop, clen, mapaddr; char *p, *q; /* We need to move the initrd down into lowmem */ ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, PAGE_SIZE); if (!ramdisk_here) panic("Cannot find place for new RAMDISK of size %lld\n", ramdisk_size); /* Note: this includes all the lowmem currently occupied by the initrd, we rely on that fact to keep the data intact. */ memblock_reserve(ramdisk_here, area_size); initrd_start = ramdisk_here + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", ramdisk_here, ramdisk_here + ramdisk_size); q = (char *)initrd_start; /* Copy any lowmem portion of the initrd */ if (ramdisk_image < end_of_lowmem) { clen = end_of_lowmem - ramdisk_image; p = (char *)__va(ramdisk_image); memcpy(q, p, clen); q += clen; ramdisk_image += clen; ramdisk_size -= clen; } /* Copy the highmem portion of the initrd */ while (ramdisk_size) { slop = ramdisk_image & ~PAGE_MASK; clen = ramdisk_size; if (clen > MAX_MAP_CHUNK-slop) clen = MAX_MAP_CHUNK-slop; mapaddr = ramdisk_image & PAGE_MASK; p = early_memremap(mapaddr, clen+slop); memcpy(q, p+slop, clen); early_iounmap(p, clen+slop); q += clen; ramdisk_image += clen; ramdisk_size -= clen; } /* high pages is not converted by early_res_to_bootmem */ ramdisk_image = boot_params.hdr.ramdisk_image; ramdisk_size = boot_params.hdr.ramdisk_size; printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to" " %08llx - %08llx\n", ramdisk_image, ramdisk_image + ramdisk_size - 1, ramdisk_here, ramdisk_here + ramdisk_size - 1); } static void __init reserve_initrd(void) { /* Assume only end is not page aligned */ u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; if (!boot_params.hdr.type_of_loader || !ramdisk_image || !ramdisk_size) return; /* No initrd provided by bootloader */ initrd_start = 0; if (ramdisk_size >= (end_of_lowmem>>1)) { memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); printk(KERN_ERR "initrd too large to handle, " "disabling initrd\n"); return; } printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image, ramdisk_end); if (ramdisk_end <= end_of_lowmem) { /* All in lowmem, easy case */ /* * don't need to reserve again, already reserved early * in i386_start_kernel */ initrd_start = ramdisk_image + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; return; } relocate_initrd(); memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); } #else static void __init reserve_initrd(void) { } #endif /* CONFIG_BLK_DEV_INITRD */ static void __init parse_setup_data(void) { struct setup_data *data; u64 pa_data; if (boot_params.hdr.version < 0x0209) return; pa_data = boot_params.hdr.setup_data; while (pa_data) { u32 data_len, map_len; map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), (u64)sizeof(struct setup_data)); data = early_memremap(pa_data, map_len); data_len = data->len + sizeof(struct setup_data); if (data_len > map_len) { early_iounmap(data, map_len); data = early_memremap(pa_data, data_len); map_len = data_len; } switch (data->type) { case SETUP_E820_EXT: parse_e820_ext(data); break; case SETUP_DTB: add_dtb(pa_data); break; default: break; } pa_data = data->next; early_iounmap(data, map_len); } } static void __init e820_reserve_setup_data(void) { struct setup_data *data; u64 pa_data; int found = 0; if (boot_params.hdr.version < 0x0209) return; pa_data = boot_params.hdr.setup_data; while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); e820_update_range(pa_data, sizeof(*data)+data->len, E820_RAM, E820_RESERVED_KERN); found = 1; pa_data = data->next; early_iounmap(data, sizeof(*data)); } if (!found) return; sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memcpy(&e820_saved, &e820, sizeof(struct e820map)); printk(KERN_INFO "extended physical RAM map:\n"); e820_print_map("reserve setup_data"); } static void __init memblock_x86_reserve_range_setup_data(void) { struct setup_data *data; u64 pa_data; if (boot_params.hdr.version < 0x0209) return; pa_data = boot_params.hdr.setup_data; while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); memblock_reserve(pa_data, sizeof(*data) + data->len); pa_data = data->next; early_iounmap(data, sizeof(*data)); } } /* * --------- Crashkernel reservation ------------------------------ */ #ifdef CONFIG_KEXEC /* * Keep the crash kernel below this limit. On 32 bits earlier kernels * would limit the kernel to the low 512 MiB due to mapping restrictions. * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this * limit once kexec-tools are fixed. */ #ifdef CONFIG_X86_32 # define CRASH_KERNEL_ADDR_MAX (512 << 20) #else # define CRASH_KERNEL_ADDR_MAX (896 << 20) #endif static void __init reserve_crashkernel(void) { unsigned long long total_mem; unsigned long long crash_size, crash_base; int ret; total_mem = memblock_phys_mem_size(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); if (ret != 0 || crash_size <= 0) return; /* 0 means: find the address automatically */ if (crash_base <= 0) { const unsigned long long alignment = 16<<20; /* 16M */ /* * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX */ crash_base = memblock_find_in_range(alignment, CRASH_KERNEL_ADDR_MAX, crash_size, alignment); if (!crash_base) { pr_info("crashkernel reservation failed - No suitable area found.\n"); return; } } else { unsigned long long start; start = memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1<<20); if (start != crash_base) { pr_info("crashkernel reservation failed - memory is in use.\n"); return; } } memblock_reserve(crash_base, crash_size); printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crash_base >> 20), (unsigned long)(total_mem >> 20)); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); } #else static void __init reserve_crashkernel(void) { } #endif static struct resource standard_io_resources[] = { { .name = "dma1", .start = 0x00, .end = 0x1f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "timer0", .start = 0x40, .end = 0x43, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "timer1", .start = 0x50, .end = 0x53, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "keyboard", .start = 0x60, .end = 0x60, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "keyboard", .start = 0x64, .end = 0x64, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma page reg", .start = 0x80, .end = 0x8f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma2", .start = 0xc0, .end = 0xdf, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "fpu", .start = 0xf0, .end = 0xff, .flags = IORESOURCE_BUSY | IORESOURCE_IO } }; void __init reserve_standard_io_resources(void) { int i; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) request_resource(&ioport_resource, &standard_io_resources[i]); } static __init void reserve_ibft_region(void) { unsigned long addr, size = 0; addr = find_ibft_region(&size); if (size) memblock_reserve(addr, size); } static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; static void __init trim_bios_range(void) { /* * A special case is the first 4Kb of memory; * This is a BIOS owned area, not kernel ram, but generally * not listed as such in the E820 table. * * This typically reserves additional memory (64KiB by default) * since some BIOSes are known to corrupt low memory. See the * Kconfig help text for X86_RESERVE_LOW. */ e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE), E820_RAM, E820_RESERVED); /* * special case: Some BIOSen report the PC BIOS * area (640->1Mb) as ram even though it is not. * take them out. */ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } static int __init parse_reservelow(char *p) { unsigned long long size; if (!p) return -EINVAL; size = memparse(p, &p); if (size < 4096) size = 4096; if (size > 640*1024) size = 640*1024; reserve_low = size; return 0; } early_param("reservelow", parse_reservelow); /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures * for initialization. Note, the efi init code path is determined by the * global efi_enabled. This allows the same kernel image to be used on existing * systems (with a traditional BIOS) as well as on EFI systems. */ /* * setup_arch - architecture-specific boot-time initializations * * Note: On x86_64, fixmaps are ready for use even before this is called. */ void __init setup_arch(char **cmdline_p) { #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); /* * copy kernel address range established so far and switch * to the proper swapper page table */ clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, initial_page_table + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); load_cr3(swapper_pg_dir); __flush_tlb_all(); #else printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif /* * If we have OLPC OFW, we might end up relocating the fixmap due to * reserve_top(), so do this before touching the ioremap area. */ olpc_ofw_detect(); early_trap_init(); early_cpu_init(); early_ioremap_init(); setup_olpc_ofw_pgd(); ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); screen_info = boot_params.screen_info; edid_info = boot_params.edid_info; #ifdef CONFIG_X86_32 apm_info.bios = boot_params.apm_bios_info; ist_info = boot_params.ist_info; if (boot_params.sys_desc_table.length != 0) { set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2); machine_id = boot_params.sys_desc_table.table[0]; machine_submodel_id = boot_params.sys_desc_table.table[1]; BIOS_revision = boot_params.sys_desc_table.table[2]; } #endif saved_video_mode = boot_params.hdr.vid_mode; bootloader_type = boot_params.hdr.type_of_loader; if ((bootloader_type >> 4) == 0xe) { bootloader_type &= 0xf; bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4; } bootloader_version = bootloader_type & 0xf; bootloader_version |= boot_params.hdr.ext_loader_ver << 4; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); #endif #ifdef CONFIG_EFI if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, "EL32", 4)) { efi_enabled = 1; efi_64bit = false; } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, "EL64", 4)) { efi_enabled = 1; efi_64bit = true; } if (efi_enabled && efi_memblock_x86_reserve_range()) efi_enabled = 0; #endif x86_init.oem.arch_setup(); iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; setup_memory_map(); parse_setup_data(); /* update the e820_saved too */ e820_reserve_setup_data(); copy_edd(); if (!boot_params.hdr.root_flags) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = _brk_end; code_resource.start = virt_to_phys(_text); code_resource.end = virt_to_phys(_etext)-1; data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; bss_resource.start = virt_to_phys(&__bss_start); bss_resource.end = virt_to_phys(&__bss_stop)-1; #ifdef CONFIG_CMDLINE_BOOL #ifdef CONFIG_CMDLINE_OVERRIDE strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); #else if (builtin_cmdline[0]) { /* append boot loader cmdline to builtin */ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); } #endif #endif strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; /* * x86_configure_nx() is called before parse_early_param() to detect * whether hardware doesn't support NX (so that the early EHCI debug * console setup can safely call set_fixmap()). It may then be called * again from within noexec_setup() during parsing early parameters * to honor the respective command line option. */ x86_configure_nx(); parse_early_param(); x86_report_nx(); /* after early param, so could get panic from serial */ memblock_x86_reserve_range_setup_data(); if (acpi_mps_check()) { #ifdef CONFIG_X86_LOCAL_APIC disable_apic = 1; #endif setup_clear_cpu_cap(X86_FEATURE_APIC); } #ifdef CONFIG_PCI if (pci_early_dump_regs) early_dump_pci_devices(); #endif finish_e820_parsing(); if (efi_enabled) efi_init(); dmi_scan_machine(); /* * VMware detection requires dmi to be available, so this * needs to be done after dmi_scan_machine, for the BP. */ init_hypervisor_platform(); x86_init.resources.probe_roms(); /* after parse_early_param, so could debug it */ insert_resource(&iomem_resource, &code_resource); insert_resource(&iomem_resource, &data_resource); insert_resource(&iomem_resource, &bss_resource); trim_bios_range(); #ifdef CONFIG_X86_32 if (ppro_with_ram_bug()) { e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM, E820_RESERVED); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); printk(KERN_INFO "fixed physical RAM map:\n"); e820_print_map("bad_ppro"); } #else early_gart_iommu_check(); #endif /* * partially used pages are not usable - thus * we are rounding upwards: */ max_pfn = e820_end_of_ram_pfn(); /* update e820 for memory not covered by WB MTRRs */ mtrr_bp_init(); if (mtrr_trim_uncached_memory(max_pfn)) max_pfn = e820_end_of_ram_pfn(); #ifdef CONFIG_X86_32 /* max_low_pfn get updated here */ find_low_pfn_range(); #else num_physpages = max_pfn; check_x2apic(); /* How many end-of-memory variables you have, grandma! */ /* need this before calling reserve_initrd */ if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) max_low_pfn = e820_end_of_low_ram_pfn(); else max_low_pfn = max_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; #endif /* * Find and reserve possible boot-time SMP configuration: */ find_smp_config(); reserve_ibft_region(); /* * Need to conclude brk, before memblock_x86_fill() * it could use memblock_find_in_range, could overlap with * brk area. */ reserve_brk(); cleanup_highmap(); memblock.current_limit = get_max_mapped(); memblock_x86_fill(); /* * The EFI specification says that boot service code won't be called * after ExitBootServices(). This is, in fact, a lie. */ if (efi_enabled) efi_reserve_boot_services(); /* preallocate 4k for mptable mpc */ early_reserve_e820_mpc_new(); #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION setup_bios_corruption_check(); #endif printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); setup_trampolines(); init_gbpages(); /* max_pfn_mapped is updated here */ max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); max_pfn_mapped = max_low_pfn_mapped; #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { max_pfn_mapped = init_memory_mapping(1UL<<32, max_pfn<<PAGE_SHIFT); /* can we preseve max_low_pfn ?*/ max_low_pfn = max_pfn; } #endif memblock.current_limit = get_max_mapped(); dma_contiguous_reserve(0); /* * NOTE: On x86-32, only from this point on, fixmaps are ready for use. */ #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT if (init_ohci1394_dma_early) init_ohci1394_dma_on_all_controllers(); #endif /* Allocate bigger log buffer */ setup_log_buf(1); reserve_initrd(); reserve_crashkernel(); vsmp_init(); io_delay_init(); /* * Parse the ACPI tables for possible boot-time SMP configuration. */ acpi_boot_table_init(); early_acpi_boot_init(); initmem_init(); memblock_find_dma_reserve(); #ifdef CONFIG_KVM_CLOCK kvmclock_init(); #endif x86_init.paging.pagetable_setup_start(swapper_pg_dir); paging_init(); x86_init.paging.pagetable_setup_done(swapper_pg_dir); if (boot_cpu_data.cpuid_level >= 0) { /* A CPU has %cr4 if and only if it has CPUID */ mmu_cr4_features = read_cr4(); } #ifdef CONFIG_X86_32 /* sync back kernel address range */ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); #endif tboot_probe(); #ifdef CONFIG_X86_64 map_vsyscall(); #endif generic_apic_probe(); early_quirks(); /* * Read APIC and some other early information from ACPI tables. */ acpi_boot_init(); sfi_init(); x86_dtb_init(); /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); prefill_possible_map(); init_cpu_to_node(); init_apic_mappings(); ioapic_and_gsi_init(); kvm_guest_init(); e820_reserve_resources(); e820_mark_nosave_regions(max_low_pfn); x86_init.resources.reserve_resources(); e820_setup_gap(); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif x86_init.oem.banner(); x86_init.timers.wallclock_init(); x86_platform.wallclock_init(); mcheck_init(); arch_init_ideal_nops(); } #ifdef CONFIG_X86_32 static struct resource video_ram_resource = { .name = "Video RAM area", .start = 0xa0000, .end = 0xbffff, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; void __init i386_reserve_resources(void) { request_resource(&iomem_resource, &video_ram_resource); reserve_standard_io_resources(); } #endif /* CONFIG_X86_32 */
gpl-2.0
HashBang173/kernel_motorola_msm8992
tools/perf/util/config.c
2842
9941
/* * config.c * * Helper functions for parsing config items. * Originally copied from GIT source. * * Copyright (C) Linus Torvalds, 2005 * Copyright (C) Johannes Schindelin, 2005 * */ #include "util.h" #include "cache.h" #include "exec_cmd.h" #define MAXNAME (256) #define DEBUG_CACHE_DIR ".debug" char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */ static FILE *config_file; static const char *config_file_name; static int config_linenr; static int config_file_eof; static const char *config_exclusive_filename; static int get_next_char(void) { int c; FILE *f; c = '\n'; if ((f = config_file) != NULL) { c = fgetc(f); if (c == '\r') { /* DOS like systems */ c = fgetc(f); if (c != '\n') { ungetc(c, f); c = '\r'; } } if (c == '\n') config_linenr++; if (c == EOF) { config_file_eof = 1; c = '\n'; } } return c; } static char *parse_value(void) { static char value[1024]; int quote = 0, comment = 0, space = 0; size_t len = 0; for (;;) { int c = get_next_char(); if (len >= sizeof(value) - 1) return NULL; if (c == '\n') { if (quote) return NULL; value[len] = 0; return value; } if (comment) continue; if (isspace(c) && !quote) { space = 1; continue; } if (!quote) { if (c == ';' || c == '#') { comment = 1; continue; } } if (space) { if (len) value[len++] = ' '; space = 0; } if (c == '\\') { c = get_next_char(); switch (c) { case '\n': continue; case 't': c = '\t'; break; case 'b': c = '\b'; break; case 'n': c = '\n'; break; /* Some characters escape as themselves */ case '\\': case '"': break; /* Reject unknown escape sequences */ default: return NULL; } value[len++] = c; continue; } if (c == '"') { quote = 1-quote; continue; } value[len++] = c; } } static inline int iskeychar(int c) { return isalnum(c) || c == '-' || c == '_'; } static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) { int c; char *value; /* Get the full name */ for (;;) { c = get_next_char(); if (config_file_eof) break; if (!iskeychar(c)) break; name[len++] = c; if (len >= MAXNAME) return -1; } name[len] = 0; while (c == ' ' || c == '\t') c = get_next_char(); value = NULL; if (c != '\n') { if (c != '=') return -1; value = parse_value(); if (!value) return -1; } return fn(name, value, data); } static int get_extended_base_var(char *name, int baselen, int c) { do { if (c == '\n') return -1; c = get_next_char(); } while (isspace(c)); /* We require the format to be '[base "extension"]' */ if (c != '"') return -1; name[baselen++] = '.'; for (;;) { int ch = get_next_char(); if (ch == '\n') return -1; if (ch == '"') break; if (ch == '\\') { ch = get_next_char(); if (ch == '\n') return -1; } name[baselen++] = ch; if (baselen > MAXNAME / 2) return -1; } /* Final ']' */ if (get_next_char() != ']') return -1; return baselen; } static int get_base_var(char *name) { int baselen = 0; for (;;) { int c = get_next_char(); if (config_file_eof) return -1; if (c == ']') return baselen; if (isspace(c)) return get_extended_base_var(name, baselen, c); if (!iskeychar(c) && c != '.') return -1; if (baselen > MAXNAME / 2) return -1; name[baselen++] = tolower(c); } } static int perf_parse_file(config_fn_t fn, void *data) { int comment = 0; int baselen = 0; static char var[MAXNAME]; /* U+FEFF Byte Order Mark in UTF8 */ static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; const unsigned char *bomptr = utf8_bom; for (;;) { int c = get_next_char(); if (bomptr && *bomptr) { /* We are at the file beginning; skip UTF8-encoded BOM * if present. Sane editors won't put this in on their * own, but e.g. Windows Notepad will do it happily. */ if ((unsigned char) c == *bomptr) { bomptr++; continue; } else { /* Do not tolerate partial BOM. */ if (bomptr != utf8_bom) break; /* No BOM at file beginning. Cool. */ bomptr = NULL; } } if (c == '\n') { if (config_file_eof) return 0; comment = 0; continue; } if (comment || isspace(c)) continue; if (c == '#' || c == ';') { comment = 1; continue; } if (c == '[') { baselen = get_base_var(var); if (baselen <= 0) break; var[baselen++] = '.'; var[baselen] = 0; continue; } if (!isalpha(c)) break; var[baselen] = tolower(c); if (get_value(fn, data, var, baselen+1) < 0) break; } die("bad config file line %d in %s", config_linenr, config_file_name); } static int parse_unit_factor(const char *end, unsigned long *val) { if (!*end) return 1; else if (!strcasecmp(end, "k")) { *val *= 1024; return 1; } else if (!strcasecmp(end, "m")) { *val *= 1024 * 1024; return 1; } else if (!strcasecmp(end, "g")) { *val *= 1024 * 1024 * 1024; return 1; } return 0; } static int perf_parse_long(const char *value, long *ret) { if (value && *value) { char *end; long val = strtol(value, &end, 0); unsigned long factor = 1; if (!parse_unit_factor(end, &factor)) return 0; *ret = val * factor; return 1; } return 0; } static void die_bad_config(const char *name) { if (config_file_name) die("bad config value for '%s' in %s", name, config_file_name); die("bad config value for '%s'", name); } int perf_config_int(const char *name, const char *value) { long ret = 0; if (!perf_parse_long(value, &ret)) die_bad_config(name); return ret; } static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) { *is_bool = 1; if (!value) return 1; if (!*value) return 0; if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) return 1; if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) return 0; *is_bool = 0; return perf_config_int(name, value); } int perf_config_bool(const char *name, const char *value) { int discard; return !!perf_config_bool_or_int(name, value, &discard); } const char *perf_config_dirname(const char *name, const char *value) { if (!name) return NULL; return value; } static int perf_default_core_config(const char *var __maybe_unused, const char *value __maybe_unused) { /* Add other config variables here. */ return 0; } int perf_default_config(const char *var, const char *value, void *dummy __maybe_unused) { if (!prefixcmp(var, "core.")) return perf_default_core_config(var, value); /* Add other config variables here. */ return 0; } static int perf_config_from_file(config_fn_t fn, const char *filename, void *data) { int ret; FILE *f = fopen(filename, "r"); ret = -1; if (f) { config_file = f; config_file_name = filename; config_linenr = 1; config_file_eof = 0; ret = perf_parse_file(fn, data); fclose(f); config_file_name = NULL; } return ret; } static const char *perf_etc_perfconfig(void) { static const char *system_wide; if (!system_wide) system_wide = system_path(ETC_PERFCONFIG); return system_wide; } static int perf_env_bool(const char *k, int def) { const char *v = getenv(k); return v ? perf_config_bool(k, v) : def; } static int perf_config_system(void) { return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); } static int perf_config_global(void) { return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); } int perf_config(config_fn_t fn, void *data) { int ret = 0, found = 0; const char *home = NULL; /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ if (config_exclusive_filename) return perf_config_from_file(fn, config_exclusive_filename, data); if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { ret += perf_config_from_file(fn, perf_etc_perfconfig(), data); found += 1; } home = getenv("HOME"); if (perf_config_global() && home) { char *user_config = strdup(mkpath("%s/.perfconfig", home)); struct stat st; if (user_config == NULL) { warning("Not enough memory to process %s/.perfconfig, " "ignoring it.", home); goto out; } if (stat(user_config, &st) < 0) goto out_free; if (st.st_uid && (st.st_uid != geteuid())) { warning("File %s not owned by current user or root, " "ignoring it.", user_config); goto out_free; } if (!st.st_size) goto out_free; ret += perf_config_from_file(fn, user_config, data); found += 1; out_free: free(user_config); } out: if (found == 0) return -1; return ret; } /* * Call this to report error for your variable that should not * get a boolean value (i.e. "[my] var" means "true"). */ int config_error_nonbool(const char *var) { return error("Missing value for '%s'", var); } struct buildid_dir_config { char *dir; }; static int buildid_dir_command_config(const char *var, const char *value, void *data) { struct buildid_dir_config *c = data; const char *v; /* same dir for all commands */ if (!prefixcmp(var, "buildid.") && !strcmp(var + 8, "dir")) { v = perf_config_dirname(var, value); if (!v) return -1; strncpy(c->dir, v, MAXPATHLEN-1); c->dir[MAXPATHLEN-1] = '\0'; } return 0; } static void check_buildid_dir_config(void) { struct buildid_dir_config c; c.dir = buildid_dir; perf_config(buildid_dir_command_config, &c); } void set_buildid_dir(void) { buildid_dir[0] = '\0'; /* try config file */ check_buildid_dir_config(); /* default to $HOME/.debug */ if (buildid_dir[0] == '\0') { char *v = getenv("HOME"); if (v) { snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s", v, DEBUG_CACHE_DIR); } else { strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1); } buildid_dir[MAXPATHLEN-1] = '\0'; } /* for communicating with external commands */ setenv("PERF_BUILDID_DIR", buildid_dir, 1); }
gpl-2.0