repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
rioutine/kernel_samsung_klte | drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c | 2040 | 74547 | /* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "vcd_ddl.h"
#include "vcd_ddl_metadata.h"
#include "vcd_res_tracker_api.h"
static u32 ddl_set_dec_property(struct ddl_client_context *pddl,
struct vcd_property_hdr *property_hdr, void *property_value);
static u32 ddl_set_enc_property(struct ddl_client_context *pddl,
struct vcd_property_hdr *property_hdr, void *property_value);
static u32 ddl_get_dec_property(struct ddl_client_context *pddl,
struct vcd_property_hdr *property_hdr, void *property_value);
static u32 ddl_get_enc_property(struct ddl_client_context *pddl,
struct vcd_property_hdr *property_hdr, void *property_value);
static u32 ddl_set_enc_dynamic_property(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr, void *property_value);
static void ddl_set_default_enc_property(struct ddl_client_context *ddl);
static void ddl_set_default_enc_profile(
struct ddl_encoder_data *encoder);
static void ddl_set_default_enc_level(struct ddl_encoder_data *encoder);
static void ddl_set_default_enc_vop_timing(
struct ddl_encoder_data *encoder);
static void ddl_set_default_enc_intra_period(
struct ddl_encoder_data *encoder);
static void ddl_set_default_enc_rc_params(
struct ddl_encoder_data *encoder);
static u32 ddl_valid_buffer_requirement(
struct vcd_buffer_requirement *original_buf_req,
struct vcd_buffer_requirement *req_buf_req);
static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *decoder);
static u32 ddl_set_dec_buffers(struct ddl_decoder_data *decoder,
struct ddl_property_dec_pic_buffers *dpb);
u32 ddl_set_property(u32 *ddl_handle,
struct vcd_property_hdr *property_hdr, void *property_value)
{
struct ddl_context *ddl_context;
struct ddl_client_context *ddl =
(struct ddl_client_context *) ddl_handle;
u32 vcd_status;
DDL_MSG_HIGH("ddl_set_property");
if (!property_hdr || !property_value) {
DDL_MSG_ERROR("ddl_set_prop:Bad_argument");
return VCD_ERR_ILLEGAL_PARM;
}
ddl_context = ddl_get_context();
if (!DDL_IS_INITIALIZED(ddl_context)) {
DDL_MSG_ERROR("ddl_set_prop:Not_inited");
return VCD_ERR_ILLEGAL_OP;
}
if (!ddl) {
DDL_MSG_ERROR("ddl_set_prop:Bad_handle");
return VCD_ERR_BAD_HANDLE;
}
if (ddl->decoding)
vcd_status = ddl_set_dec_property(ddl, property_hdr,
property_value);
else
vcd_status = ddl_set_enc_property(ddl, property_hdr,
property_value);
if (vcd_status)
DDL_MSG_ERROR("ddl_set_prop:FAILED");
return vcd_status;
}
u32 ddl_get_property(u32 *ddl_handle,
struct vcd_property_hdr *property_hdr, void *property_value)
{
struct ddl_context *ddl_context;
struct ddl_client_context *ddl =
(struct ddl_client_context *) ddl_handle;
u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
DDL_MSG_HIGH("ddl_get_property");
if (!property_hdr || !property_value)
return VCD_ERR_ILLEGAL_PARM;
if (property_hdr->prop_id == DDL_I_CAPABILITY) {
if (sizeof(struct ddl_property_capability) ==
property_hdr->sz) {
struct ddl_property_capability *ddl_capability =
(struct ddl_property_capability *)
property_value;
ddl_capability->max_num_client = VCD_MAX_NO_CLIENT;
ddl_capability->exclusive = VCD_COMMAND_EXCLUSIVE;
ddl_capability->frame_command_depth =
VCD_FRAME_COMMAND_DEPTH;
ddl_capability->general_command_depth =
VCD_GENEVIDC_COMMAND_DEPTH;
ddl_capability->ddl_time_out_in_ms =
DDL_HW_TIMEOUT_IN_MS;
vcd_status = VCD_S_SUCCESS;
}
return vcd_status;
}
ddl_context = ddl_get_context();
if (!DDL_IS_INITIALIZED(ddl_context))
return VCD_ERR_ILLEGAL_OP;
if (!ddl)
return VCD_ERR_BAD_HANDLE;
if (ddl->decoding)
vcd_status = ddl_get_dec_property(ddl, property_hdr,
property_value);
else
vcd_status = ddl_get_enc_property(ddl, property_hdr,
property_value);
if (vcd_status)
DDL_MSG_ERROR("ddl_get_prop:FAILED");
else
DDL_MSG_MED("ddl_get_prop:SUCCESS");
return vcd_status;
}
u32 ddl_decoder_ready_to_start(struct ddl_client_context *ddl,
struct vcd_sequence_hdr *header)
{
struct ddl_decoder_data *decoder =
&(ddl->codec_data.decoder);
if (!decoder->codec.codec) {
DDL_MSG_ERROR("ddl_dec_start_check:Codec_not_set");
return false;
}
if ((!header) && (!decoder->client_frame_size.height ||
!decoder->client_frame_size.width)) {
DDL_MSG_ERROR("ddl_dec_start_check:"
"Client_height_width_default");
return false;
}
return true;
}
u32 ddl_encoder_ready_to_start(struct ddl_client_context *ddl)
{
struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
if (!encoder->codec.codec || !encoder->frame_size.height ||
!encoder->frame_size.width ||
!encoder->frame_rate.fps_denominator ||
!encoder->frame_rate.fps_numerator ||
!encoder->target_bit_rate.target_bitrate)
return false;
if (encoder->frame_rate.fps_numerator >
(encoder->frame_rate.fps_denominator *
encoder->vop_timing.vop_time_resolution)) {
DDL_MSG_ERROR("ResVsFrameRateFailed!");
return false;
}
if (encoder->profile.profile == VCD_PROFILE_H264_BASELINE &&
encoder->entropy_control.entropy_sel == VCD_ENTROPY_SEL_CABAC) {
DDL_MSG_ERROR("H264BaseLineCABAC!!");
return false;
}
if (DDL_IS_LTR_ENABLED(encoder)) {
if ((encoder->codec.codec != VCD_CODEC_H264) ||
(encoder->i_period.b_frames)) {
DDL_MSG_ERROR("%s: Only support LTR encoding "\
"for H264 without B frame. Current "\
"codec %d, B-frame %d", __func__,
encoder->codec.codec,
encoder->i_period.b_frames);
return false;
}
if (encoder->ltr_control.ltrmode.ltr_mode ==
VCD_LTR_MODE_MANUAL) {
DDL_MSG_ERROR("%s: Manual LTR mode not supported!",
__func__);
return false;
}
DDL_MSG_HIGH("%s: LTR: mode = %u, count = %u, period = %u",
__func__, (u32)encoder->ltr_control.ltrmode.ltr_mode,
encoder->ltr_control.ltr_count,
encoder->ltr_control.ltr_period);
}
return true;
}
static u32 ddl_set_dec_property(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr, void *property_value)
{
struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
u32 vcd_status = VCD_ERR_ILLEGAL_PARM ;
switch (property_hdr->prop_id) {
case DDL_I_DPB_RELEASE:
if ((sizeof(struct ddl_frame_data_tag) ==
property_hdr->sz) &&
(decoder->dp_buf.no_of_dec_pic_buf))
vcd_status = ddl_decoder_dpb_transact(decoder,
(struct ddl_frame_data_tag *)
property_value, DDL_DPB_OP_MARK_FREE);
break;
case DDL_I_DPB:
{
struct ddl_property_dec_pic_buffers *dpb =
(struct ddl_property_dec_pic_buffers *) property_value;
if ((sizeof(struct ddl_property_dec_pic_buffers) ==
property_hdr->sz) &&
(DDLCLIENT_STATE_IS(ddl,
DDL_CLIENT_WAIT_FOR_INITCODEC) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) &&
(dpb->no_of_dec_pic_buf ==
decoder->client_output_buf_req.actual_count))
vcd_status = ddl_set_dec_buffers(decoder, dpb);
}
break;
case DDL_I_REQ_OUTPUT_FLUSH:
if (sizeof(u32) == property_hdr->sz) {
decoder->dynamic_prop_change |=
DDL_DEC_REQ_OUTPUT_FLUSH;
decoder->dpb_mask.client_mask = 0;
decoder->field_needed_for_prev_ip = 0;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_INPUT_BUF_REQ:
{
struct vcd_buffer_requirement *buffer_req =
(struct vcd_buffer_requirement *)property_value;
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz &&
(DDLCLIENT_STATE_IS(ddl,
DDL_CLIENT_WAIT_FOR_INITCODEC) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) &&
(ddl_valid_buffer_requirement(
&decoder->min_input_buf_req, buffer_req))) {
decoder->client_input_buf_req = *buffer_req;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case DDL_I_OUTPUT_BUF_REQ:
{
struct vcd_buffer_requirement *buffer_req =
(struct vcd_buffer_requirement *)property_value;
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz &&
(DDLCLIENT_STATE_IS(ddl,
DDL_CLIENT_WAIT_FOR_INITCODEC) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) &&
(ddl_valid_buffer_requirement(
&decoder->min_output_buf_req, buffer_req))) {
decoder->client_output_buf_req =
*buffer_req;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_CODEC:
{
struct vcd_property_codec *codec =
(struct vcd_property_codec *)property_value;
if (sizeof(struct vcd_property_codec) ==
property_hdr->sz &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
ddl_codec_type_transact(ddl, false,
codec->codec)) {
if (decoder->codec.codec != codec->codec) {
decoder->codec = *codec;
ddl_set_default_dec_property(ddl);
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_POST_FILTER:
if (sizeof(struct vcd_property_post_filter) ==
property_hdr->sz &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) && (
decoder->codec.codec == VCD_CODEC_MPEG4 ||
decoder->codec.codec == VCD_CODEC_MPEG2)) {
decoder->post_filter =
*(struct vcd_property_post_filter *)
property_value;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_FRAME_SIZE:
{
struct vcd_property_frame_size *frame_size =
(struct vcd_property_frame_size *) property_value;
if ((sizeof(struct vcd_property_frame_size) ==
property_hdr->sz) &&
(DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) &&
(DDL_ALLOW_DEC_FRAMESIZE(frame_size->width,
frame_size->height))) {
if (decoder->client_frame_size.height !=
frame_size->height ||
decoder->client_frame_size.width !=
frame_size->width) {
decoder->client_frame_size = *frame_size;
ddl_set_default_decoder_buffer_req(decoder,
true);
}
DDL_MSG_LOW("set VCD_I_FRAME_SIZE width = %d"
" height = %d\n",
frame_size->width, frame_size->height);
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_SET_TURBO_CLK:
{
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_BUFFER_FORMAT:
{
struct vcd_property_buffer_format *tile =
(struct vcd_property_buffer_format *)
property_value;
if (sizeof(struct vcd_property_buffer_format) ==
property_hdr->sz &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
tile->buffer_format == VCD_BUFFER_FORMAT_TILE_4x2) {
if (tile->buffer_format !=
decoder->buf_format.buffer_format) {
decoder->buf_format = *tile;
ddl_set_default_decoder_buffer_req(
decoder, true);
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_SET_EXT_METABUFFER:
{
int index, buffer_size;
u8 *phys_addr;
u8 *virt_addr;
struct vcd_property_meta_buffer *meta_buffer =
(struct vcd_property_meta_buffer *) property_value;
DDL_MSG_LOW("Entered VCD_I_SET_EXT_METABUFFER Virt: %p,"\
"Phys %p, fd: %d size: %d count: %d",
meta_buffer->kernel_virtual_addr,
meta_buffer->physical_addr,
meta_buffer->pmem_fd,
meta_buffer->size, meta_buffer->count);
if ((property_hdr->sz == sizeof(struct
vcd_property_meta_buffer)) &&
(DDLCLIENT_STATE_IS(ddl,
DDL_CLIENT_WAIT_FOR_INITCODEC) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN))) {
phys_addr = meta_buffer->dev_addr;
virt_addr = meta_buffer->kernel_virtual_addr;
buffer_size = meta_buffer->size/meta_buffer->count;
for (index = 0; index < meta_buffer->count; index++) {
ddl->codec_data.decoder.hw_bufs.
meta_hdr[index].align_physical_addr
= phys_addr;
ddl->codec_data.decoder.hw_bufs.
meta_hdr[index].align_virtual_addr
= virt_addr;
ddl->codec_data.decoder.hw_bufs.
meta_hdr[index].buffer_size
= buffer_size;
ddl->codec_data.decoder.hw_bufs.
meta_hdr[index].physical_base_addr
= phys_addr;
ddl->codec_data.decoder.hw_bufs.
meta_hdr[index].virtual_base_addr
= virt_addr;
DDL_MSG_LOW("Meta Buffer: "\
"Assigned %d buffer for "
"virt: %p, phys %p for "
"meta_buffers "
"of size: %d\n",
index, virt_addr,
phys_addr, buffer_size);
phys_addr += buffer_size;
virt_addr += buffer_size;
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_H264_MV_BUFFER:
{
int index, buffer_size;
u8 *phys_addr;
u8 *virt_addr;
struct vcd_property_h264_mv_buffer *mv_buff =
(struct vcd_property_h264_mv_buffer *)
property_value;
DDL_MSG_LOW("Entered VCD_I_H264_MV_BUFFER Virt: %p, Phys %p,"
"fd: %d size: %d count: %d\n",
mv_buff->kernel_virtual_addr,
mv_buff->physical_addr,
mv_buff->pmem_fd,
mv_buff->size, mv_buff->count);
if ((property_hdr->sz == sizeof(struct
vcd_property_h264_mv_buffer)) &&
(DDLCLIENT_STATE_IS(ddl,
DDL_CLIENT_WAIT_FOR_INITCODEC) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN))) {
phys_addr = mv_buff->dev_addr;
virt_addr = mv_buff->kernel_virtual_addr;
buffer_size = mv_buff->size/mv_buff->count;
for (index = 0; index < mv_buff->count; index++) {
ddl->codec_data.decoder.hw_bufs.
h264_mv[index].align_physical_addr
= phys_addr;
ddl->codec_data.decoder.hw_bufs.
h264_mv[index].align_virtual_addr
= virt_addr;
ddl->codec_data.decoder.hw_bufs.
h264_mv[index].buffer_size
= buffer_size;
ddl->codec_data.decoder.hw_bufs.
h264_mv[index].physical_base_addr
= phys_addr;
ddl->codec_data.decoder.hw_bufs.
h264_mv[index].virtual_base_addr
= virt_addr;
DDL_MSG_LOW("Assigned %d buffer for "
"virt: %p, phys %p for "
"h264_mv_buffers "
"of size: %d\n",
index, virt_addr,
phys_addr, buffer_size);
phys_addr += buffer_size;
virt_addr += buffer_size;
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_FREE_H264_MV_BUFFER:
{
memset(&decoder->hw_bufs.h264_mv, 0, sizeof(struct
ddl_buf_addr) * DDL_MAX_BUFFER_COUNT);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_FREE_EXT_METABUFFER:
{
memset(&decoder->hw_bufs.meta_hdr, 0, sizeof(struct
ddl_buf_addr) * DDL_MAX_BUFFER_COUNT);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_OUTPUT_ORDER:
{
if (sizeof(u32) == property_hdr->sz &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
decoder->output_order =
*(u32 *)property_value;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_DEC_PICTYPE:
{
if ((sizeof(u32) == property_hdr->sz) &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
decoder->idr_only_decoding =
*(u32 *)property_value;
ddl_set_default_decoder_buffer_req(
decoder, true);
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_METADATA_ENABLE:
case VCD_I_METADATA_HEADER:
DDL_MSG_MED("Meta Data Interface is Requested");
vcd_status = ddl_set_metadata_params(ddl, property_hdr,
property_value);
vcd_status = VCD_S_SUCCESS;
break;
case VCD_I_FRAME_RATE:
vcd_status = VCD_S_SUCCESS;
break;
case VCD_I_CONT_ON_RECONFIG:
{
DDL_MSG_LOW("Set property VCD_I_CONT_ON_RECONFIG\n");
if (sizeof(u32) == property_hdr->sz &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
decoder->cont_mode = *(u32 *)property_value;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_DISABLE_DMX:
{
int disable_dmx_allowed = 0;
DDL_MSG_LOW("Set property VCD_I_DISABLE_DMX\n");
if (res_trk_get_disable_dmx() &&
((decoder->codec.codec == VCD_CODEC_H264) ||
(decoder->codec.codec == VCD_CODEC_VC1) ||
(decoder->codec.codec == VCD_CODEC_VC1_RCV)))
disable_dmx_allowed = 1;
if (sizeof(u32) == property_hdr->sz &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
disable_dmx_allowed) {
decoder->dmx_disable = *(u32 *)property_value;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_REQ_PERF_LEVEL:
vcd_status = VCD_S_SUCCESS;
break;
default:
vcd_status = VCD_ERR_ILLEGAL_OP;
break;
}
return vcd_status;
}
static u32 ddl_check_valid_enc_level(struct vcd_property_codec *codec,
struct vcd_property_profile *profile,
struct vcd_property_level *level)
{
u32 status = false;
if (codec && profile && level) {
switch (codec->codec) {
case VCD_CODEC_MPEG4:
status = (profile->profile ==
VCD_PROFILE_MPEG4_SP) &&
(level->level >= VCD_LEVEL_MPEG4_0) &&
(level->level <= VCD_LEVEL_MPEG4_6) &&
(VCD_LEVEL_MPEG4_3b != level->level);
status = status ||
((profile->profile ==
VCD_PROFILE_MPEG4_ASP) &&
(level->level >= VCD_LEVEL_MPEG4_0) &&
(level->level <= VCD_LEVEL_MPEG4_5));
break;
case VCD_CODEC_H264:
status = (level->level >= VCD_LEVEL_H264_1) &&
(level->level <= VCD_LEVEL_H264_4);
break;
case VCD_CODEC_H263:
status = (level->level >= VCD_LEVEL_H263_10) &&
(level->level <= VCD_LEVEL_H263_70);
break;
default:
break;
}
}
return status;
}
static u32 ddl_set_enc_property(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr,
void *property_value)
{
struct ddl_encoder_data *encoder =
&(ddl->codec_data.encoder);
u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE) ||
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
vcd_status = ddl_set_enc_dynamic_property(ddl,
property_hdr, property_value);
}
if (vcd_status) {
if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) ||
vcd_status != VCD_ERR_ILLEGAL_OP) {
DDL_MSG_ERROR("ddl_set_enc_property:"
"Fails_as_not_in_open_state");
return VCD_ERR_ILLEGAL_OP;
}
} else
return vcd_status;
switch (property_hdr->prop_id) {
case VCD_I_FRAME_SIZE:
{
struct vcd_property_frame_size *frame_size =
(struct vcd_property_frame_size *) property_value;
if ((sizeof(struct vcd_property_frame_size) ==
property_hdr->sz) &&
(DDL_ALLOW_ENC_FRAMESIZE(frame_size->width,
frame_size->height))) {
if (encoder->frame_size.height != frame_size->height ||
encoder->frame_size.width !=
frame_size->width) {
ddl_calculate_stride(frame_size, false);
encoder->frame_size = *frame_size;
ddl_set_default_encoder_buffer_req(encoder);
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_CODEC:
{
struct vcd_property_codec *codec =
(struct vcd_property_codec *) property_value;
if ((sizeof(struct vcd_property_codec) ==
property_hdr->sz) &&
(ddl_codec_type_transact(ddl, false, codec->codec))) {
if (codec->codec != encoder->codec.codec) {
encoder->codec = *codec;
ddl_set_default_enc_property(ddl);
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_REQ_IFRAME:
vcd_status = VCD_S_SUCCESS;
break;
case VCD_I_INTRA_PERIOD:
{
struct vcd_property_i_period *i_period =
(struct vcd_property_i_period *)property_value;
if (sizeof(struct vcd_property_i_period) ==
property_hdr->sz &&
i_period->b_frames <= DDL_MAX_NUM_OF_B_FRAME) {
encoder->i_period = *i_period;
encoder->client_input_buf_req.min_count =
i_period->b_frames + 1;
encoder->client_input_buf_req.actual_count =
DDL_MAX(encoder->client_input_buf_req.\
actual_count, encoder->\
client_input_buf_req.min_count);
encoder->client_output_buf_req.min_count =
i_period->b_frames + 2;
encoder->client_output_buf_req.actual_count =
DDL_MAX(encoder->client_output_buf_req.\
actual_count, encoder->\
client_output_buf_req.min_count);
ddl->extra_output_buf_count =
i_period->b_frames - 1;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_PROFILE:
{
struct vcd_property_profile *profile =
(struct vcd_property_profile *)property_value;
if ((sizeof(struct vcd_property_profile) ==
property_hdr->sz) && ((
(encoder->codec.codec == VCD_CODEC_MPEG4) && (
profile->profile == VCD_PROFILE_MPEG4_SP ||
profile->profile == VCD_PROFILE_MPEG4_ASP)) ||
((encoder->codec.codec == VCD_CODEC_H264) &&
(profile->profile >= VCD_PROFILE_H264_BASELINE) &&
(profile->profile <= VCD_PROFILE_H264_HIGH)) ||
((encoder->codec.codec == VCD_CODEC_H263) &&
(profile->profile == VCD_PROFILE_H263_BASELINE)))) {
encoder->profile = *profile;
vcd_status = VCD_S_SUCCESS;
if (profile->profile == VCD_PROFILE_H264_BASELINE)
encoder->entropy_control.entropy_sel =
VCD_ENTROPY_SEL_CAVLC;
else
encoder->entropy_control.entropy_sel =
VCD_ENTROPY_SEL_CABAC;
}
}
break;
case VCD_I_LEVEL:
{
struct vcd_property_level *level =
(struct vcd_property_level *) property_value;
if ((sizeof(struct vcd_property_level) ==
property_hdr->sz) && (ddl_check_valid_enc_level
(&encoder->codec,
&encoder->profile, level))) {
encoder->level = *level;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_MULTI_SLICE:
{
struct vcd_property_multi_slice *multi_slice =
(struct vcd_property_multi_slice *)
property_value;
DDL_MSG_HIGH("VCD_I_MULTI_SLICE eMSliceSel %d "\
"nMSliceSize %d Tot#of MB %d "\
"encoder->frame_size.width = %d "\
"encoder->frame_size.height = %d",
(int)multi_slice->m_slice_sel,
multi_slice->m_slice_size,
DDL_NO_OF_MB(encoder->frame_size.width,
encoder->frame_size.height),
encoder->frame_size.width,
encoder->frame_size.height);
switch (multi_slice->m_slice_sel) {
case VCD_MSLICE_OFF:
vcd_status = VCD_S_SUCCESS;
break;
case VCD_MSLICE_BY_GOB:
if (encoder->codec.codec == VCD_CODEC_H263)
vcd_status = VCD_S_SUCCESS;
break;
case VCD_MSLICE_BY_MB_COUNT:
{
if ((multi_slice->m_slice_size >= 1) &&
(multi_slice->m_slice_size <=
DDL_NO_OF_MB(encoder->frame_size.width,
encoder->frame_size.height))) {
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_MSLICE_BY_BYTE_COUNT:
if (multi_slice->m_slice_size > 0)
vcd_status = VCD_S_SUCCESS;
break;
default:
break;
}
if (sizeof(struct vcd_property_multi_slice) ==
property_hdr->sz && !vcd_status) {
encoder->multi_slice = *multi_slice;
if (multi_slice->m_slice_sel == VCD_MSLICE_OFF)
encoder->multi_slice.m_slice_size = 0;
}
}
break;
case VCD_I_RATE_CONTROL:
{
struct vcd_property_rate_control *rate_control =
(struct vcd_property_rate_control *)
property_value;
if (sizeof(struct vcd_property_rate_control) ==
property_hdr->sz &&
rate_control->rate_control >=
VCD_RATE_CONTROL_OFF &&
rate_control->rate_control <=
VCD_RATE_CONTROL_CBR_CFR) {
encoder->rc = *rate_control;
ddl_set_default_enc_rc_params(encoder);
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_SHORT_HEADER:
if (sizeof(struct vcd_property_short_header) ==
property_hdr->sz &&
encoder->codec.codec ==
VCD_CODEC_MPEG4) {
encoder->short_header =
*(struct vcd_property_short_header *)
property_value;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_VOP_TIMING:
{
struct vcd_property_vop_timing *vop_time =
(struct vcd_property_vop_timing *)
property_value;
if ((sizeof(struct vcd_property_vop_timing) ==
property_hdr->sz) &&
(encoder->frame_rate.fps_numerator <=
vop_time->vop_time_resolution) &&
(encoder->codec.codec == VCD_CODEC_MPEG4)) {
encoder->vop_timing = *vop_time;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_HEADER_EXTENSION:
if (sizeof(u32) == property_hdr->sz &&
encoder->codec.codec == VCD_CODEC_MPEG4) {
encoder->hdr_ext_control = *(u32 *)property_value;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_ENTROPY_CTRL:
{
struct vcd_property_entropy_control *entropy_control =
(struct vcd_property_entropy_control *)
property_value;
if (sizeof(struct vcd_property_entropy_control) ==
property_hdr->sz &&
encoder->codec.codec == VCD_CODEC_H264 &&
entropy_control->entropy_sel >=
VCD_ENTROPY_SEL_CAVLC &&
entropy_control->entropy_sel <=
VCD_ENTROPY_SEL_CABAC) {
if ((entropy_control->entropy_sel ==
VCD_ENTROPY_SEL_CABAC) &&
(encoder->entropy_control.cabac_model ==
VCD_CABAC_MODEL_NUMBER_1 ||
encoder->entropy_control.cabac_model ==
VCD_CABAC_MODEL_NUMBER_2)) {
vcd_status = VCD_ERR_ILLEGAL_PARM;
} else {
encoder->entropy_control = *entropy_control;
vcd_status = VCD_S_SUCCESS;
}
}
}
break;
case VCD_I_DEBLOCKING:
{
struct vcd_property_db_config *db_config =
(struct vcd_property_db_config *) property_value;
if (sizeof(struct vcd_property_db_config) ==
property_hdr->sz &&
encoder->codec.codec == VCD_CODEC_H264 &&
db_config->db_config >=
VCD_DB_ALL_BLOCKING_BOUNDARY &&
db_config->db_config <=
VCD_DB_SKIP_SLICE_BOUNDARY) {
encoder->db_control = *db_config;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_QP_RANGE:
{
struct vcd_property_qp_range *qp =
(struct vcd_property_qp_range *)property_value;
if ((sizeof(struct vcd_property_qp_range) ==
property_hdr->sz) && (qp->min_qp <=
qp->max_qp) && ((encoder->codec.codec ==
VCD_CODEC_H264 && qp->max_qp <= DDL_MAX_H264_QP) ||
(qp->max_qp <= DDL_MAX_MPEG4_QP))) {
encoder->qp_range = *qp;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_SESSION_QP:
{
struct vcd_property_session_qp *qp =
(struct vcd_property_session_qp *)property_value;
if ((sizeof(struct vcd_property_session_qp) ==
property_hdr->sz) &&
(qp->i_frame_qp >= encoder->qp_range.min_qp) &&
(qp->i_frame_qp <= encoder->qp_range.max_qp) &&
(qp->p_frame_qp >= encoder->qp_range.min_qp) &&
(qp->p_frame_qp <= encoder->qp_range.max_qp)) {
encoder->session_qp = *qp;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_RC_LEVEL_CONFIG:
{
struct vcd_property_rc_level *rc_level =
(struct vcd_property_rc_level *) property_value;
if (sizeof(struct vcd_property_rc_level) ==
property_hdr->sz &&
(encoder->rc.rate_control >=
VCD_RATE_CONTROL_VBR_VFR ||
encoder->rc.rate_control <=
VCD_RATE_CONTROL_CBR_VFR) &&
(!rc_level->mb_level_rc ||
encoder->codec.codec == VCD_CODEC_H264)) {
encoder->rc_level = *rc_level;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_FRAME_LEVEL_RC:
{
struct vcd_property_frame_level_rc_params
*frame_level_rc =
(struct vcd_property_frame_level_rc_params *)
property_value;
if ((sizeof(struct vcd_property_frame_level_rc_params) ==
property_hdr->sz) &&
(frame_level_rc->reaction_coeff) &&
(encoder->rc_level.frame_level_rc)) {
encoder->frame_level_rc = *frame_level_rc;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_ADAPTIVE_RC:
if ((sizeof(struct vcd_property_adaptive_rc_params) ==
property_hdr->sz) &&
(encoder->codec.codec == VCD_CODEC_H264) &&
(encoder->rc_level.mb_level_rc)) {
encoder->adaptive_rc =
*(struct vcd_property_adaptive_rc_params *)
property_value;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_BUFFER_FORMAT:
{
struct vcd_property_buffer_format *buffer_format =
(struct vcd_property_buffer_format *)
property_value;
if (sizeof(struct vcd_property_buffer_format) ==
property_hdr->sz &&
((buffer_format->buffer_format ==
VCD_BUFFER_FORMAT_NV12_16M2KA) ||
(VCD_BUFFER_FORMAT_TILE_4x2 ==
buffer_format->buffer_format))) {
if (buffer_format->buffer_format !=
encoder->buf_format.buffer_format) {
encoder->buf_format = *buffer_format;
ddl_set_default_encoder_buffer_req(encoder);
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case DDL_I_INPUT_BUF_REQ:
{
struct vcd_buffer_requirement *buffer_req =
(struct vcd_buffer_requirement *)property_value;
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz && (ddl_valid_buffer_requirement(
&encoder->input_buf_req, buffer_req))) {
encoder->client_input_buf_req = *buffer_req;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case DDL_I_OUTPUT_BUF_REQ:
{
struct vcd_buffer_requirement *buffer_req =
(struct vcd_buffer_requirement *)property_value;
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz && (ddl_valid_buffer_requirement(
&encoder->output_buf_req, buffer_req))) {
encoder->client_output_buf_req = *buffer_req;
encoder->client_output_buf_req.sz =
DDL_ALIGN(buffer_req->sz,
DDL_KILO_BYTE(4));
DDL_MSG_LOW("%s encoder->client_output_buf_req.sz"
" = %d\n", __func__,
encoder->client_output_buf_req.sz);
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_RECON_BUFFERS:
{
int index, index_hw_bufs = -1;
struct vcd_property_enc_recon_buffer *recon_buffers =
(struct vcd_property_enc_recon_buffer *)property_value;
for (index = 0; index < 4; index++) {
if (!encoder->hw_bufs.dpb_y[index].
align_physical_addr) {
index_hw_bufs = index;
break;
} else
continue;
}
if (index_hw_bufs == -1) {
DDL_MSG_HIGH("ERROR: value of index_hw_bufs");
vcd_status = VCD_ERR_ILLEGAL_PARM;
} else {
if (property_hdr->sz == sizeof(struct
vcd_property_enc_recon_buffer)) {
encoder->hw_bufs.dpb_y[index_hw_bufs].
align_physical_addr =
recon_buffers->dev_addr;
encoder->hw_bufs.dpb_y[index_hw_bufs].
align_virtual_addr =
recon_buffers->kernel_virtual_addr;
encoder->hw_bufs.dpb_y[index_hw_bufs].
buffer_size = recon_buffers->buffer_size;
encoder->hw_bufs.dpb_c[index_hw_bufs].
align_physical_addr =
recon_buffers->dev_addr +
ddl_get_yuv_buf_size(
encoder->frame_size.width,
encoder->frame_size.height,
DDL_YUV_BUF_TYPE_TILE);
encoder->hw_bufs.dpb_c[index_hw_bufs].
align_virtual_addr =
recon_buffers->kernel_virtual_addr +
recon_buffers->ysize;
DDL_MSG_LOW("Y::KVirt: %p,KPhys: %p"
"UV::KVirt: %p,KPhys: %p\n",
encoder->hw_bufs.dpb_y[index_hw_bufs].
align_virtual_addr,
encoder->hw_bufs.dpb_y[index_hw_bufs].
align_physical_addr,
encoder->hw_bufs.dpb_c[index_hw_bufs].
align_virtual_addr,
encoder->hw_bufs.dpb_c[index_hw_bufs].
align_physical_addr);
vcd_status = VCD_S_SUCCESS;
}
}
}
break;
case VCD_I_FREE_RECON_BUFFERS:
{
memset(&encoder->hw_bufs.dpb_y, 0,
sizeof(struct ddl_buf_addr) * 4);
memset(&encoder->hw_bufs.dpb_c, 0,
sizeof(struct ddl_buf_addr) * 4);
vcd_status = VCD_S_SUCCESS;
break;
}
case VCD_I_METADATA_ENABLE:
case VCD_I_METADATA_HEADER:
DDL_MSG_LOW("Meta Data Interface is Requested");
if (!res_trk_check_for_sec_session()) {
if (!encoder->slice_delivery_info.enable) {
vcd_status = ddl_set_metadata_params(ddl,
property_hdr, property_value);
} else {
DDL_MSG_ERROR("Ignoring meta data settting in "
"slice mode: %s\n", __func__);
vcd_status = VCD_S_SUCCESS;
}
} else {
DDL_MSG_ERROR("Meta Data Interface is not "
"supported in secure session");
vcd_status = VCD_ERR_ILLEGAL_OP;
}
break;
case VCD_I_META_BUFFER_MODE:
vcd_status = VCD_S_SUCCESS;
break;
case VCD_I_ENABLE_SPS_PPS_FOR_IDR:
{
struct vcd_property_sps_pps_for_idr_enable *sps_pps =
(struct vcd_property_sps_pps_for_idr_enable *) property_value;
if ((sizeof(struct vcd_property_sps_pps_for_idr_enable)) ==
property_hdr->sz) {
DDL_MSG_LOW("SPS PPS generation for IDR Encode "
"is Requested");
encoder->sps_pps.sps_pps_for_idr_enable_flag =
sps_pps->sps_pps_for_idr_enable_flag;
vcd_status = VCD_S_SUCCESS;
}
break;
}
case VCD_I_SLICE_DELIVERY_MODE:
{
size_t output_buf_size;
u32 num_mb, num_slices;
struct vcd_property_hdr slice_property_hdr;
struct vcd_property_meta_data_enable slice_meta_data;
DDL_MSG_HIGH("Set property VCD_I_SLICE_DELIVERY_MODE\n");
if (sizeof(u32) == property_hdr->sz &&
encoder->codec.codec == VCD_CODEC_H264 &&
encoder->multi_slice.m_slice_sel
== VCD_MSLICE_BY_MB_COUNT &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) {
encoder->slice_delivery_info.enable
= *(u32 *)property_value;
DDL_MSG_HIGH("set encoder->slice_delivery_mode = %u\n",
encoder->slice_delivery_info.enable);
output_buf_size =
encoder->client_output_buf_req.sz;
num_mb = DDL_NO_OF_MB(encoder->frame_size.width,
encoder->frame_size.height);
num_slices = num_mb/
encoder->multi_slice.m_slice_size;
num_slices = ((num_mb - num_slices *
encoder->multi_slice.m_slice_size) > 0)
? (num_slices + 1) : num_slices;
encoder->slice_delivery_info.num_slices =
num_slices;
if (num_slices <= DDL_MAX_NUM_BFRS_FOR_SLICE_BATCH) {
DDL_MSG_HIGH("%s: currently slice info "
"metadata is not supported when slice "
"delivery mode is enabled. hence "
"disabling slice info metadata.\n",
__func__);
slice_property_hdr.prop_id =
VCD_I_METADATA_ENABLE;
slice_property_hdr.sz =
sizeof(struct \
vcd_property_meta_data_enable);
ddl_get_metadata_params(ddl,
&slice_property_hdr,
&slice_meta_data);
slice_meta_data.meta_data_enable_flag
&= ~VCD_METADATA_ENC_SLICE;
ddl_set_metadata_params(ddl,
&slice_property_hdr,
&slice_meta_data);
encoder->client_output_buf_req.min_count =
((DDL_ENC_SLICE_BATCH_FACTOR * num_slices + 2)
> DDL_MAX_BUFFER_COUNT)
? DDL_MAX_BUFFER_COUNT :
(DDL_ENC_SLICE_BATCH_FACTOR * num_slices + 2);
output_buf_size =
encoder->client_output_buf_req.sz/num_slices;
encoder->client_output_buf_req.sz =
DDL_ALIGN(output_buf_size, DDL_KILO_BYTE(4));
if (encoder->client_output_buf_req. \
actual_count < encoder-> \
client_output_buf_req.min_count) {
encoder->client_output_buf_req. \
actual_count = encoder-> \
client_output_buf_req.min_count;
}
encoder->output_buf_req =
encoder->client_output_buf_req;
DDL_MSG_HIGH("%s num_mb = %u num_slices = %u" \
" min_count = %u act_count = %u" \
" aligned size = %u\n",
__func__, num_mb, num_slices,
encoder->client_output_buf_req.min_count,
encoder->client_output_buf_req.actual_count,
encoder->client_output_buf_req.sz);
vcd_status = VCD_S_SUCCESS;
}
}
break;
}
case VCD_I_LTR_MODE:
if (sizeof(struct vcd_property_ltrmode_type) ==
property_hdr->sz && encoder->codec.codec ==
VCD_CODEC_H264) {
struct vcd_property_ltrmode_type *ltrmode =
(struct vcd_property_ltrmode_type *)
property_value;
encoder->ltr_control.ltrmode.ltr_mode =
ltrmode->ltr_mode;
DDL_MSG_HIGH("%s: set LTR mode = %u", __func__,
(u32)encoder->ltr_control.ltrmode.ltr_mode);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_LTR_COUNT:
if (sizeof(struct vcd_property_ltrcount_type) ==
property_hdr->sz && encoder->codec.codec ==
VCD_CODEC_H264) {
struct vcd_property_ltrcount_type *ltrcount =
(struct vcd_property_ltrcount_type *)
property_value;
if (ltrcount->ltr_count > DDL_MAX_NUM_LTR_FRAMES) {
DDL_MSG_ERROR("%s: set LTR count failed. "\
"LTR count %u beyond maximum of %u",
__func__, ltrcount->ltr_count,
(u32)DDL_MAX_NUM_LTR_FRAMES);
} else {
encoder->ltr_control.ltr_count =
ltrcount->ltr_count;
DDL_MSG_HIGH("%s: set LTR count = %u", __func__,
encoder->ltr_control.ltr_count);
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_REQ_PERF_LEVEL:
vcd_status = VCD_S_SUCCESS;
break;
case VCD_I_ENABLE_DELIMITER_FLAG:
{
struct vcd_property_avc_delimiter_enable *delimiter_enable =
(struct vcd_property_avc_delimiter_enable *)
property_value;
if (sizeof(struct vcd_property_avc_delimiter_enable) ==
property_hdr->sz &&
encoder->codec.codec == VCD_CODEC_H264) {
encoder->avc_delimiter_enable =
delimiter_enable->avc_delimiter_enable_flag;
vcd_status = VCD_S_SUCCESS;
}
break;
}
case VCD_I_ENABLE_VUI_TIMING_INFO:
{
struct vcd_property_vui_timing_info_enable *vui_timing_enable =
(struct vcd_property_vui_timing_info_enable *)
property_value;
if (sizeof(struct vcd_property_vui_timing_info_enable) ==
property_hdr->sz &&
encoder->codec.codec == VCD_CODEC_H264) {
encoder->vui_timinginfo_enable =
vui_timing_enable->vui_timing_info;
vcd_status = VCD_S_SUCCESS;
}
break;
}
case VCD_I_H263_PLUSPTYPE:
{
struct vcd_property_plusptype *plusptype =
(struct vcd_property_plusptype *)property_value;
if ((sizeof(struct vcd_property_plusptype) ==
property_hdr->sz) && encoder->codec.codec ==
VCD_CODEC_H263) {
encoder->plusptype_enable = plusptype->plusptype_enable;
DDL_MSG_LOW("\nencoder->plusptype_enable = %u",
encoder->plusptype_enable);
vcd_status = VCD_S_SUCCESS;
}
break;
}
default:
DDL_MSG_ERROR("%s: unknown prop_id = 0x%x", __func__,
property_hdr->prop_id);
vcd_status = VCD_ERR_ILLEGAL_OP;
break;
}
return vcd_status;
}
static u32 ddl_get_dec_property(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr, void *property_value)
{
struct ddl_decoder_data *decoder = &ddl->codec_data.decoder;
struct vcd_property_frame_size *fz_size;
u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
DDL_MSG_HIGH("property_hdr->prop_id:%x\n", property_hdr->prop_id);
switch (property_hdr->prop_id) {
case VCD_I_FRAME_SIZE:
if (sizeof(struct vcd_property_frame_size) ==
property_hdr->sz) {
ddl_calculate_stride(&decoder->client_frame_size,
!decoder->progressive_only);
fz_size =
&decoder->client_frame_size;
fz_size->stride =
DDL_TILE_ALIGN(fz_size->width,
DDL_TILE_ALIGN_WIDTH);
fz_size->scan_lines =
DDL_TILE_ALIGN(fz_size->height,
DDL_TILE_ALIGN_HEIGHT);
*(struct vcd_property_frame_size *)
property_value =
decoder->client_frame_size;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_PROFILE:
if (sizeof(struct vcd_property_profile) ==
property_hdr->sz) {
*(struct vcd_property_profile *)property_value =
decoder->profile;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_LEVEL:
if (sizeof(struct vcd_property_level) ==
property_hdr->sz) {
*(struct vcd_property_level *)property_value =
decoder->level;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_PROGRESSIVE_ONLY:
if (sizeof(u32) == property_hdr->sz) {
*(u32 *)property_value =
decoder->progressive_only;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_INPUT_BUF_REQ:
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz) {
*(struct vcd_buffer_requirement *)
property_value =
decoder->client_input_buf_req;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_OUTPUT_BUF_REQ:
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz) {
*(struct vcd_buffer_requirement *)
property_value = decoder->client_output_buf_req;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_CODEC:
if (sizeof(struct vcd_property_codec) ==
property_hdr->sz) {
*(struct vcd_property_codec *) property_value =
decoder->codec;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_BUFFER_FORMAT:
if (sizeof(struct vcd_property_buffer_format) ==
property_hdr->sz) {
*(struct vcd_property_buffer_format *)
property_value = decoder->buf_format;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_POST_FILTER:
if (sizeof(struct vcd_property_post_filter) ==
property_hdr->sz) {
*(struct vcd_property_post_filter *)
property_value =
decoder->post_filter;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_SEQHDR_ALIGN_BYTES:
if (sizeof(u32) == property_hdr->sz) {
*(u32 *)property_value =
DDL_LINEAR_BUFFER_ALIGN_BYTES;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_FRAME_PROC_UNITS:
if (sizeof(u32) == property_hdr->sz) {
if (!decoder->progressive_only &&
(decoder->client_frame_size.width *
decoder->client_frame_size.height) <=
DDL_FRAME_VGA_SIZE) {
*(u32 *) property_value = DDL_NO_OF_MB(
DDL_FRAME_720P_WIDTH,
DDL_FRAME_720P_HEIGHT);
} else {
*(u32 *) property_value = DDL_NO_OF_MB(
decoder->client_frame_size.width,
decoder->client_frame_size.height);
}
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_DPB_RETRIEVE:
if (sizeof(struct ddl_frame_data_tag) ==
property_hdr->sz) {
vcd_status = ddl_decoder_dpb_transact(decoder,
(struct ddl_frame_data_tag *)
property_value, DDL_DPB_OP_RETRIEVE);
}
break;
case VCD_I_GET_H264_MV_SIZE:
if (property_hdr->sz == sizeof(struct
vcd_property_buffer_size)) {
struct vcd_property_buffer_size *mv_size =
(struct vcd_property_buffer_size *) property_value;
mv_size->size = ddl_get_yuv_buf_size(mv_size->width,
mv_size->height, DDL_YUV_BUF_TYPE_TILE);
mv_size->alignment = DDL_TILE_BUFFER_ALIGN_BYTES;
DDL_MSG_LOW("w: %d, h: %d, S: %d, "
"A: %d", mv_size->width,
mv_size->height, mv_size->size,
mv_size->alignment);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_OUTPUT_ORDER:
{
if (sizeof(u32) == property_hdr->sz) {
*(u32 *)property_value = decoder->output_order;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_METADATA_ENABLE:
case VCD_I_METADATA_HEADER:
DDL_MSG_ERROR("Meta Data Interface is Requested");
vcd_status = ddl_get_metadata_params(ddl, property_hdr,
property_value);
vcd_status = VCD_S_SUCCESS;
break;
case VCD_I_CONT_ON_RECONFIG:
if (sizeof(u32) == property_hdr->sz) {
*(u32 *)property_value = decoder->cont_mode;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_DISABLE_DMX_SUPPORT:
if (sizeof(u32) == property_hdr->sz) {
*(u32 *)property_value = res_trk_get_disable_dmx();
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_DISABLE_DMX:
if (sizeof(u32) == property_hdr->sz) {
*(u32 *)property_value = decoder->dmx_disable;
vcd_status = VCD_S_SUCCESS;
}
break;
default:
vcd_status = VCD_ERR_ILLEGAL_OP;
break;
}
return vcd_status;
}
static u32 ddl_get_enc_property(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr, void *property_value)
{
struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
switch (property_hdr->prop_id) {
case VCD_I_CODEC:
if (sizeof(struct vcd_property_codec) ==
property_hdr->sz) {
*(struct vcd_property_codec *) property_value =
encoder->codec;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_FRAME_SIZE:
if (sizeof(struct vcd_property_frame_size) ==
property_hdr->sz) {
*(struct vcd_property_frame_size *)
property_value = encoder->frame_size;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_FRAME_RATE:
if (sizeof(struct vcd_property_frame_rate) ==
property_hdr->sz) {
*(struct vcd_property_frame_rate *)
property_value = encoder->frame_rate;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_TARGET_BITRATE:
if (sizeof(struct vcd_property_target_bitrate) ==
property_hdr->sz) {
*(struct vcd_property_target_bitrate *)
property_value = encoder->target_bit_rate;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_RATE_CONTROL:
if (sizeof(struct vcd_property_rate_control) ==
property_hdr->sz) {
*(struct vcd_property_rate_control *)
property_value = encoder->rc;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_PROFILE:
if (sizeof(struct vcd_property_profile) ==
property_hdr->sz) {
*(struct vcd_property_profile *) property_value =
encoder->profile;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_LEVEL:
if (sizeof(struct vcd_property_level) ==
property_hdr->sz) {
*(struct vcd_property_level *) property_value =
encoder->level;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_MULTI_SLICE:
if (sizeof(struct vcd_property_multi_slice) ==
property_hdr->sz) {
*(struct vcd_property_multi_slice *)
property_value = encoder->multi_slice;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_SEQ_HEADER:
{
struct vcd_sequence_hdr *seq_hdr =
(struct vcd_sequence_hdr *) property_value;
if (!encoder->seq_header_length) {
seq_hdr->sequence_header_len =
encoder->seq_header_length;
vcd_status = VCD_ERR_NO_SEQ_HDR;
} else if (sizeof(struct vcd_sequence_hdr) ==
property_hdr->sz &&
encoder->seq_header_length <=
seq_hdr->sequence_header_len) {
memcpy(seq_hdr->sequence_header,
encoder->seq_header.align_virtual_addr,
encoder->seq_header_length);
seq_hdr->sequence_header_len =
encoder->seq_header_length;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case DDL_I_SEQHDR_PRESENT:
if (sizeof(u32) == property_hdr->sz) {
if ((encoder->codec.codec ==
VCD_CODEC_MPEG4 &&
!encoder->short_header.short_header) ||
encoder->codec.codec == VCD_CODEC_H264)
*(u32 *) property_value = 0x1;
else
*(u32 *) property_value = 0x0;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_VOP_TIMING:
if (sizeof(struct vcd_property_vop_timing) ==
property_hdr->sz) {
*(struct vcd_property_vop_timing *)
property_value = encoder->vop_timing;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_SHORT_HEADER:
if (sizeof(struct vcd_property_short_header) ==
property_hdr->sz) {
if (encoder->codec.codec == VCD_CODEC_MPEG4) {
*(struct vcd_property_short_header *)
property_value =
encoder->short_header;
vcd_status = VCD_S_SUCCESS;
} else
vcd_status = VCD_ERR_ILLEGAL_OP;
}
break;
case VCD_I_ENTROPY_CTRL:
if (sizeof(struct vcd_property_entropy_control) ==
property_hdr->sz) {
if (encoder->codec.codec == VCD_CODEC_H264) {
*(struct vcd_property_entropy_control *)
property_value =
encoder->entropy_control;
vcd_status = VCD_S_SUCCESS;
} else
vcd_status = VCD_ERR_ILLEGAL_OP;
}
break;
case VCD_I_DEBLOCKING:
if (sizeof(struct vcd_property_db_config) ==
property_hdr->sz) {
if (encoder->codec.codec == VCD_CODEC_H264) {
*(struct vcd_property_db_config *)
property_value =
encoder->db_control;
vcd_status = VCD_S_SUCCESS;
} else
vcd_status = VCD_ERR_ILLEGAL_OP;
}
break;
case VCD_I_INTRA_PERIOD:
if (sizeof(struct vcd_property_i_period) ==
property_hdr->sz) {
*(struct vcd_property_i_period *)
property_value = encoder->i_period;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_QP_RANGE:
if (sizeof(struct vcd_property_qp_range) ==
property_hdr->sz) {
*(struct vcd_property_qp_range *)
property_value = encoder->qp_range;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_SESSION_QP:
if (sizeof(struct vcd_property_session_qp) ==
property_hdr->sz) {
*(struct vcd_property_session_qp *)
property_value = encoder->session_qp;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_RC_LEVEL_CONFIG:
if (sizeof(struct vcd_property_rc_level) ==
property_hdr->sz) {
*(struct vcd_property_rc_level *)
property_value = encoder->rc_level;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_FRAME_LEVEL_RC:
if (sizeof(struct vcd_property_frame_level_rc_params) ==
property_hdr->sz) {
*(struct vcd_property_frame_level_rc_params *)
property_value = encoder->frame_level_rc;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_ADAPTIVE_RC:
if (sizeof(struct vcd_property_adaptive_rc_params) ==
property_hdr->sz) {
*(struct vcd_property_adaptive_rc_params *)
property_value = encoder->adaptive_rc;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_INTRA_REFRESH:
if (sizeof(struct vcd_property_intra_refresh_mb_number) ==
property_hdr->sz) {
*(struct vcd_property_intra_refresh_mb_number *)
property_value = encoder->intra_refresh;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_INPUT_BUF_REQ:
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz) {
*(struct vcd_buffer_requirement *)
property_value = encoder->client_input_buf_req;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_OUTPUT_BUF_REQ:
if (sizeof(struct vcd_buffer_requirement) ==
property_hdr->sz) {
*(struct vcd_buffer_requirement *)
property_value = encoder->client_output_buf_req;
DDL_MSG_LOW("%s encoder->client_output_buf_req = %d\n",
__func__,
encoder->client_output_buf_req.sz);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_BUFFER_FORMAT:
if (sizeof(struct vcd_property_buffer_format) ==
property_hdr->sz) {
*(struct vcd_property_buffer_format *)
property_value = encoder->buf_format;
vcd_status = VCD_S_SUCCESS;
}
break;
case DDL_I_FRAME_PROC_UNITS:
if (sizeof(u32) == property_hdr->sz &&
encoder->frame_size.width &&
encoder->frame_size.height) {
*(u32 *)property_value = DDL_NO_OF_MB(
encoder->frame_size.width,
encoder->frame_size.height);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_HEADER_EXTENSION:
if (sizeof(u32) == property_hdr->sz &&
encoder->codec.codec == VCD_CODEC_MPEG4) {
*(u32 *) property_value =
encoder->hdr_ext_control;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_GET_RECON_BUFFER_SIZE:
{
u32 ysize, uvsize;
if (property_hdr->sz == sizeof(struct
vcd_property_buffer_size)) {
struct vcd_property_buffer_size *recon_buff_size =
(struct vcd_property_buffer_size *) property_value;
ysize = ddl_get_yuv_buf_size(recon_buff_size->width,
recon_buff_size->height, DDL_YUV_BUF_TYPE_TILE);
uvsize = ddl_get_yuv_buf_size(recon_buff_size->width,
recon_buff_size->height/2,
DDL_YUV_BUF_TYPE_TILE);
recon_buff_size->size = ysize + uvsize;
recon_buff_size->alignment =
DDL_TILE_BUFFER_ALIGN_BYTES;
DDL_MSG_LOW("w: %d, h: %d, S: %d, A: %d",
recon_buff_size->width, recon_buff_size->height,
recon_buff_size->size, recon_buff_size->alignment);
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_METADATA_ENABLE:
case VCD_I_METADATA_HEADER:
DDL_MSG_HIGH("Meta Data Interface is Requested");
vcd_status = ddl_get_metadata_params(ddl, property_hdr,
property_value);
vcd_status = VCD_S_SUCCESS;
break;
case VCD_I_ENABLE_SPS_PPS_FOR_IDR:
if (sizeof(struct vcd_property_sps_pps_for_idr_enable) ==
property_hdr->sz) {
*(struct vcd_property_sps_pps_for_idr_enable *)
property_value = encoder->sps_pps;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_SLICE_DELIVERY_MODE:
if (sizeof(struct vcd_property_slice_delivery_info) ==
property_hdr->sz) {
*(struct vcd_property_slice_delivery_info *)
property_value = encoder->slice_delivery_info;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_ENABLE_DELIMITER_FLAG:
if (sizeof(struct vcd_property_avc_delimiter_enable) ==
property_hdr->sz) {
((struct vcd_property_avc_delimiter_enable *)
property_value)->avc_delimiter_enable_flag =
encoder->avc_delimiter_enable;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_ENABLE_VUI_TIMING_INFO:
if (sizeof(struct vcd_property_vui_timing_info_enable) ==
property_hdr->sz) {
((struct vcd_property_vui_timing_info_enable *)
property_value)->vui_timing_info =
encoder->vui_timinginfo_enable;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_CAPABILITY_LTR_COUNT:
if (sizeof(struct vcd_property_range_type) ==
property_hdr->sz) {
struct vcd_property_range_type capability_ltr_range;
capability_ltr_range.max = DDL_MAX_NUM_LTR_FRAMES;
capability_ltr_range.min = 1;
capability_ltr_range.step_size = 1;
*(struct vcd_property_range_type *)property_value =
capability_ltr_range;
DDL_MSG_HIGH("%s: capability_ltr_count = %u",
__func__, ((struct vcd_property_range_type *)
property_value)->max);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_LTR_MODE:
if (sizeof(struct vcd_property_ltrmode_type) ==
property_hdr->sz) {
((struct vcd_property_ltrmode_type *)
property_value)->ltr_mode =
encoder->ltr_control.ltrmode.ltr_mode;
DDL_MSG_HIGH("%s: ltr_mode = %u", __func__,
(u32)(((struct vcd_property_ltrmode_type *)
property_value)->ltr_mode));
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_LTR_COUNT:
if (sizeof(struct vcd_property_ltrcount_type) ==
property_hdr->sz) {
struct vcd_property_ltrcount_type ltr_count;
ltr_count.ltr_count =
encoder->ltr_control.ltr_count;
*(struct vcd_property_ltrcount_type *)property_value =
ltr_count;
DDL_MSG_HIGH("%s: ltr_count = %u", __func__,
((struct vcd_property_ltrcount_type *)
property_value)->ltr_count);
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_LTR_PERIOD:
if (sizeof(struct vcd_property_ltrperiod_type) ==
property_hdr->sz) {
struct vcd_property_ltrperiod_type ltr_period;
if (!encoder->ltr_control.ltr_period)
ltr_period.ltr_period = 0;
else
ltr_period.ltr_period =
encoder->ltr_control.ltr_period - 1;
*(struct vcd_property_ltrperiod_type *)property_value =
ltr_period;
DDL_MSG_HIGH("%s: ltr_period = %u", __func__,
((struct vcd_property_ltrperiod_type *)
property_value)->ltr_period);
vcd_status = VCD_S_SUCCESS;
}
break;
default:
DDL_MSG_ERROR("%s: unknown prop_id = 0x%x", __func__,
property_hdr->prop_id);
vcd_status = VCD_ERR_ILLEGAL_OP;
break;
}
return vcd_status;
}
static u32 ddl_set_enc_dynamic_property(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr, void *property_value)
{
struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
u32 dynamic_prop_change = 0x0;
switch (property_hdr->prop_id) {
case VCD_I_REQ_IFRAME:
if (sizeof(struct vcd_property_req_i_frame) ==
property_hdr->sz) {
dynamic_prop_change |= DDL_ENC_REQ_IFRAME;
vcd_status = VCD_S_SUCCESS;
}
break;
case VCD_I_TARGET_BITRATE:
{
struct vcd_property_target_bitrate *bitrate =
(struct vcd_property_target_bitrate *)property_value;
if (sizeof(struct vcd_property_target_bitrate) ==
property_hdr->sz && bitrate->target_bitrate &&
bitrate->target_bitrate <= DDL_MAX_BIT_RATE) {
encoder->target_bit_rate = *bitrate;
dynamic_prop_change = DDL_ENC_CHANGE_BITRATE;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_INTRA_PERIOD:
{
struct vcd_property_i_period *i_period =
(struct vcd_property_i_period *)property_value;
if (sizeof(struct vcd_property_i_period) ==
property_hdr->sz) {
encoder->i_period = *i_period;
dynamic_prop_change = DDL_ENC_CHANGE_IPERIOD;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_FRAME_RATE:
{
struct vcd_property_frame_rate *frame_rate =
(struct vcd_property_frame_rate *)
property_value;
if (sizeof(struct vcd_property_frame_rate) ==
property_hdr->sz &&
frame_rate->fps_denominator &&
frame_rate->fps_numerator &&
frame_rate->fps_denominator <=
frame_rate->fps_numerator) {
encoder->frame_rate = *frame_rate;
dynamic_prop_change = DDL_ENC_CHANGE_FRAMERATE;
if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
(encoder->codec.codec != VCD_CODEC_MPEG4 ||
encoder->short_header.short_header)) {
ddl_set_default_enc_vop_timing(encoder);
}
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_INTRA_REFRESH:
{
struct vcd_property_intra_refresh_mb_number
*intra_refresh_mb_num =
(struct vcd_property_intra_refresh_mb_number *)
property_value;
u32 frame_mb_num = DDL_NO_OF_MB(encoder->frame_size.width,
encoder->frame_size.height);
if ((sizeof(struct vcd_property_intra_refresh_mb_number) ==
property_hdr->sz) &&
(intra_refresh_mb_num->cir_mb_number <= frame_mb_num)) {
encoder->intra_refresh = *intra_refresh_mb_num;
dynamic_prop_change = DDL_ENC_CHANGE_CIR;
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_LTR_PERIOD:
{
if (sizeof(struct vcd_property_ltrperiod_type) ==
property_hdr->sz) {
struct vcd_property_ltrperiod_type *ltrperiod =
(struct vcd_property_ltrperiod_type *)
property_value;
encoder->ltr_control.ltr_period =
(ltrperiod->ltr_period == 0xFFFFFFFF) ?
0xFFFFFFFF : (ltrperiod->ltr_period + 1);
DDL_MSG_HIGH("%s: set ltr_period = %u", __func__,
encoder->ltr_control.ltr_period);
vcd_status = VCD_S_SUCCESS;
}
}
break;
case VCD_I_LTR_USE:
{
if (sizeof(struct vcd_property_ltruse_type) ==
property_hdr->sz) {
struct vcd_property_ltruse_type *ltruse =
(struct vcd_property_ltruse_type *)
property_value;
if (ltruse->ltr_id >= DDL_LTR_FRAME_START_ID) {
struct ddl_ltr_encoding_type *ltr_ctrl =
&encoder->ltr_control;
s32 idx;
idx = ddl_find_ltr_from_list(ltr_ctrl,
ltruse->ltr_id);
if (idx < 0) {
ltr_ctrl->callback_reqd = true;
ltr_ctrl->failed_use_cmd.ltr_id =
ltruse->ltr_id;
ltr_ctrl->failed_use_cmd.ltr_frames =
ltruse->ltr_frames;
DDL_MSG_ERROR("%s: index (%d) "\
"not found. Callback requested. "\
"ltr_id = %u, ltr_frames = %u",
__func__, idx, ltruse->ltr_id,
ltruse->ltr_frames);
} else {
ddl_use_ltr_from_list(ltr_ctrl, idx);
ltr_ctrl->ltr_use_frames =
ltruse->ltr_frames;
if (ltr_ctrl->using == false)
ltr_ctrl->\
out_frame_cnt_to_use_this_ltr =
ltruse->ltr_frames;
else
ltr_ctrl->\
pending_chg_ltr_useframes =
true;
ltr_ctrl->first_ltr_use_arvd = true;
ltr_ctrl->use_ltr_reqd = true;
DDL_MSG_HIGH("%s: index (%d) found. "\
"num frames to use this ltr_id (%u) "\
"is %u", __func__, idx,
ltruse->ltr_id, ltruse->ltr_frames);
}
dynamic_prop_change = DDL_ENC_LTR_USE_FRAME;
vcd_status = VCD_S_SUCCESS;
} else {
DDL_MSG_ERROR("%s: LTRUse ID %d failed. "\
"LTR ID starts from %d", __func__,
ltruse->ltr_id,
(u32)DDL_LTR_FRAME_START_ID);
vcd_status = VCD_ERR_ILLEGAL_OP;
}
}
}
break;
default:
vcd_status = VCD_ERR_ILLEGAL_OP;
break;
}
if (!vcd_status && (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)
|| DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)))
encoder->dynamic_prop_change |= dynamic_prop_change;
return vcd_status;
}
void ddl_set_default_dec_property(struct ddl_client_context *ddl)
{
struct ddl_decoder_data *decoder =
&(ddl->codec_data.decoder);
if (decoder->codec.codec >= VCD_CODEC_MPEG2 &&
decoder->codec.codec <= VCD_CODEC_XVID)
decoder->post_filter.post_filter = false;
else
decoder->post_filter.post_filter = false;
decoder->buf_format.buffer_format = VCD_BUFFER_FORMAT_TILE_4x2;
decoder->client_frame_size.height = VCD_DDL_TEST_DEFAULT_HEIGHT;
decoder->client_frame_size.width = VCD_DDL_TEST_DEFAULT_WIDTH;
decoder->client_frame_size.stride = VCD_DDL_TEST_DEFAULT_WIDTH;
decoder->client_frame_size.scan_lines = VCD_DDL_TEST_DEFAULT_HEIGHT;
decoder->progressive_only = 1;
decoder->idr_only_decoding = false;
decoder->output_order = VCD_DEC_ORDER_DISPLAY;
decoder->field_needed_for_prev_ip = 0;
decoder->cont_mode = 0;
decoder->reconfig_detected = false;
decoder->dmx_disable = false;
ddl_set_default_metadata_flag(ddl);
ddl_set_default_decoder_buffer_req(decoder, true);
}
static void ddl_set_default_enc_property(struct ddl_client_context *ddl)
{
struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
ddl_set_default_enc_profile(encoder);
ddl_set_default_enc_level(encoder);
encoder->rc.rate_control = VCD_RATE_CONTROL_VBR_VFR;
ddl_set_default_enc_rc_params(encoder);
ddl_set_default_enc_intra_period(encoder);
encoder->intra_refresh.cir_mb_number = 0;
ddl_set_default_enc_vop_timing(encoder);
encoder->multi_slice.m_slice_sel = VCD_MSLICE_OFF;
encoder->multi_slice.m_slice_size = 0;
ddl->b_count = 0;
encoder->short_header.short_header = false;
encoder->entropy_control.entropy_sel = VCD_ENTROPY_SEL_CAVLC;
encoder->entropy_control.cabac_model = VCD_CABAC_MODEL_NUMBER_0;
encoder->db_control.db_config =
VCD_DB_ALL_BLOCKING_BOUNDARY;
encoder->db_control.slice_alpha_offset = 0;
encoder->db_control.slice_beta_offset = 0;
encoder->recon_buf_format.buffer_format =
VCD_BUFFER_FORMAT_TILE_1x1;
encoder->buf_format.buffer_format = VCD_BUFFER_FORMAT_NV12_16M2KA;
encoder->hdr_ext_control = 0;
encoder->mb_info_enable = false;
encoder->num_references_for_p_frame = DDL_MIN_NUM_REF_FOR_P_FRAME;
if (encoder->codec.codec == VCD_CODEC_MPEG4)
encoder->closed_gop = true;
encoder->intra_period_changed = false;
memset(&encoder->ltr_control, 0,
sizeof(struct ddl_ltr_encoding_type));
ddl_set_default_metadata_flag(ddl);
ddl_set_default_encoder_buffer_req(encoder);
encoder->slice_delivery_info.enable = 0;
encoder->slice_delivery_info.num_slices = 0;
encoder->slice_delivery_info.num_slices_enc = 0;
encoder->avc_delimiter_enable = 0;
encoder->vui_timinginfo_enable = 0;
}
static void ddl_set_default_enc_profile(struct ddl_encoder_data *encoder)
{
enum vcd_codec codec = encoder->codec.codec;
if (codec == VCD_CODEC_MPEG4)
encoder->profile.profile = VCD_PROFILE_MPEG4_SP;
else if (codec == VCD_CODEC_H264)
encoder->profile.profile = VCD_PROFILE_H264_BASELINE;
else
encoder->profile.profile = VCD_PROFILE_H263_BASELINE;
}
static void ddl_set_default_enc_level(struct ddl_encoder_data *encoder)
{
enum vcd_codec codec = encoder->codec.codec;
if (codec == VCD_CODEC_MPEG4)
encoder->level.level = VCD_LEVEL_MPEG4_1;
else if (codec == VCD_CODEC_H264)
encoder->level.level = VCD_LEVEL_H264_1;
else
encoder->level.level = VCD_LEVEL_H263_10;
}
static void ddl_set_default_enc_vop_timing(
struct ddl_encoder_data *encoder)
{
if (encoder->codec.codec == VCD_CODEC_MPEG4) {
encoder->vop_timing.vop_time_resolution =
(encoder->frame_rate.fps_numerator << 1) /
encoder->frame_rate.fps_denominator;
} else
encoder->vop_timing.vop_time_resolution =
DDL_FRAMERATE_SCALE(DDL_INITIAL_FRAME_RATE);
}
static void ddl_set_default_enc_intra_period(
struct ddl_encoder_data *encoder)
{
switch (encoder->rc.rate_control) {
default:
case VCD_RATE_CONTROL_VBR_VFR:
case VCD_RATE_CONTROL_VBR_CFR:
case VCD_RATE_CONTROL_CBR_VFR:
case VCD_RATE_CONTROL_OFF:
encoder->i_period.p_frames =
((encoder->frame_rate.fps_numerator << 1) /
encoder->frame_rate.fps_denominator) - 1;
break;
case VCD_RATE_CONTROL_CBR_CFR:
encoder->i_period.p_frames =
((encoder->frame_rate.fps_numerator >> 1) /
encoder->frame_rate.fps_denominator) - 1;
break;
}
encoder->i_period.b_frames = DDL_DEFAULT_NUM_OF_B_FRAME;
}
static void ddl_set_default_enc_rc_params(
struct ddl_encoder_data *encoder)
{
enum vcd_codec codec = encoder->codec.codec;
encoder->rc_level.frame_level_rc = true;
encoder->qp_range.min_qp = 0x1;
if (codec == VCD_CODEC_H264) {
encoder->qp_range.min_qp = 0x1;
encoder->qp_range.max_qp = 0x33;
encoder->session_qp.i_frame_qp = 0x14;
encoder->session_qp.p_frame_qp = 0x14;
encoder->session_qp.b_frame_qp = 0x14;
encoder->rc_level.mb_level_rc = true;
encoder->adaptive_rc.disable_activity_region_flag = true;
encoder->adaptive_rc.disable_dark_region_as_flag = true;
encoder->adaptive_rc.disable_smooth_region_as_flag = true;
encoder->adaptive_rc.disable_static_region_as_flag = true;
} else {
encoder->qp_range.max_qp = 0x1f;
encoder->qp_range.min_qp = 0x1;
encoder->session_qp.i_frame_qp = 0xd;
encoder->session_qp.p_frame_qp = 0xd;
encoder->session_qp.b_frame_qp = 0xd;
encoder->rc_level.frame_level_rc = true;
encoder->rc_level.mb_level_rc = false;
}
switch (encoder->rc.rate_control) {
case VCD_RATE_CONTROL_VBR_CFR:
encoder->r_cframe_skip = 0;
encoder->frame_level_rc.reaction_coeff = 0x1f4;
break;
case VCD_RATE_CONTROL_CBR_VFR:
encoder->r_cframe_skip = 1;
if (codec != VCD_CODEC_H264) {
encoder->session_qp.i_frame_qp = 0xf;
encoder->session_qp.p_frame_qp = 0xf;
encoder->session_qp.b_frame_qp = 0xf;
}
encoder->frame_level_rc.reaction_coeff = 0x14;
break;
case VCD_RATE_CONTROL_CBR_CFR:
encoder->r_cframe_skip = 0;
encoder->frame_level_rc.reaction_coeff = 0x6;
break;
case VCD_RATE_CONTROL_OFF:
encoder->r_cframe_skip = 0;
encoder->rc_level.frame_level_rc = false;
encoder->rc_level.mb_level_rc = false;
break;
case VCD_RATE_CONTROL_VBR_VFR:
default:
encoder->r_cframe_skip = 1;
encoder->frame_level_rc.reaction_coeff = 0x1f4;
break;
}
}
void ddl_set_default_encoder_buffer_req(struct ddl_encoder_data *encoder)
{
u32 y_cb_cr_size, y_size;
memset(&encoder->hw_bufs.dpb_y, 0, sizeof(struct ddl_buf_addr) * 4);
memset(&encoder->hw_bufs.dpb_c, 0, sizeof(struct ddl_buf_addr) * 4);
y_cb_cr_size = ddl_get_yuv_buffer_size(&encoder->frame_size,
&encoder->buf_format, false,
encoder->hdr.decoding, &y_size);
encoder->input_buf_size.size_yuv = y_cb_cr_size;
encoder->input_buf_size.size_y = y_size;
encoder->input_buf_size.size_c = y_cb_cr_size - y_size;
memset(&encoder->input_buf_req , 0 ,
sizeof(struct vcd_buffer_requirement));
encoder->input_buf_req.min_count = 3;
encoder->input_buf_req.actual_count =
encoder->input_buf_req.min_count;
encoder->input_buf_req.max_count = DDL_MAX_BUFFER_COUNT;
encoder->input_buf_req.sz = y_cb_cr_size;
if (encoder->buf_format.buffer_format ==
VCD_BUFFER_FORMAT_NV12_16M2KA)
encoder->input_buf_req.align =
DDL_LINEAR_BUFFER_ALIGN_BYTES;
else if (VCD_BUFFER_FORMAT_TILE_4x2 ==
encoder->buf_format.buffer_format)
encoder->input_buf_req.align = DDL_TILE_BUFFER_ALIGN_BYTES;
encoder->client_input_buf_req = encoder->input_buf_req;
memset(&encoder->output_buf_req , 0 ,
sizeof(struct vcd_buffer_requirement));
encoder->output_buf_req.min_count = encoder->i_period.b_frames + 2;
encoder->output_buf_req.actual_count =
encoder->output_buf_req.min_count + 3;
encoder->output_buf_req.max_count = DDL_MAX_BUFFER_COUNT;
encoder->output_buf_req.align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
if (y_cb_cr_size >= VCD_DDL_720P_YUV_BUF_SIZE)
y_cb_cr_size = y_cb_cr_size>>1;
encoder->output_buf_req.sz =
DDL_ALIGN(y_cb_cr_size, DDL_KILO_BYTE(4));
ddl_set_default_encoder_metadata_buffer_size(encoder);
encoder->client_output_buf_req = encoder->output_buf_req;
DDL_MSG_LOW("%s encoder->client_output_buf_req.sz = %d\n",
__func__, encoder->client_output_buf_req.sz);
}
u32 ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *decoder,
u32 estimate)
{
struct vcd_property_frame_size *frame_size;
struct vcd_buffer_requirement *input_buf_req;
struct vcd_buffer_requirement *output_buf_req;
u32 min_dpb, y_cb_cr_size;
u32 frame_height_actual = 0;
if (!decoder->codec.codec)
return false;
if (estimate) {
if (!decoder->cont_mode)
min_dpb = ddl_decoder_min_num_dpb(decoder);
else
min_dpb = res_trk_get_min_dpb_count();
frame_size = &decoder->client_frame_size;
output_buf_req = &decoder->client_output_buf_req;
input_buf_req = &decoder->client_input_buf_req;
y_cb_cr_size = ddl_get_yuv_buffer_size(frame_size,
&decoder->buf_format,
(!decoder->progressive_only),
decoder->hdr.decoding, NULL);
} else {
frame_size = &decoder->frame_size;
output_buf_req = &decoder->actual_output_buf_req;
input_buf_req = &decoder->actual_input_buf_req;
min_dpb = decoder->min_dpb_num;
if ((decoder->buf_format.buffer_format ==
VCD_BUFFER_FORMAT_TILE_4x2) &&
(frame_size->height < MDP_MIN_TILE_HEIGHT)) {
frame_height_actual = frame_size->height;
frame_size->height = MDP_MIN_TILE_HEIGHT;
ddl_calculate_stride(frame_size,
!decoder->progressive_only);
y_cb_cr_size = ddl_get_yuv_buffer_size(
frame_size,
&decoder->buf_format,
(!decoder->progressive_only),
decoder->hdr.decoding, NULL);
decoder->y_cb_cr_size = y_cb_cr_size;
} else
y_cb_cr_size = decoder->y_cb_cr_size;
}
memset(output_buf_req, 0,
sizeof(struct vcd_buffer_requirement));
if (!decoder->idr_only_decoding && !decoder->cont_mode)
output_buf_req->actual_count = min_dpb + 4;
else
output_buf_req->actual_count = min_dpb;
output_buf_req->min_count = min_dpb;
output_buf_req->max_count = DDL_MAX_BUFFER_COUNT;
output_buf_req->sz = y_cb_cr_size;
DDL_MSG_LOW("output_buf_req->sz : %d", output_buf_req->sz);
if (decoder->buf_format.buffer_format != VCD_BUFFER_FORMAT_NV12)
output_buf_req->align = DDL_TILE_BUFFER_ALIGN_BYTES;
else
output_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
ddl_set_default_decoder_metadata_buffer_size(decoder, frame_size,
output_buf_req);
decoder->min_output_buf_req = *output_buf_req;
memset(input_buf_req, 0,
sizeof(struct vcd_buffer_requirement));
input_buf_req->min_count = 1;
input_buf_req->actual_count = input_buf_req->min_count + 1;
input_buf_req->max_count = DDL_MAX_BUFFER_COUNT;
input_buf_req->sz = (1024 * 1024 * 2);
input_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
decoder->min_input_buf_req = *input_buf_req;
if (frame_height_actual) {
frame_size->height = frame_height_actual;
ddl_calculate_stride(frame_size, !decoder->progressive_only);
}
return true;
}
u32 ddl_get_yuv_buffer_size(struct vcd_property_frame_size *frame_size,
struct vcd_property_buffer_format *buf_format,
u32 interlace, u32 decoding, u32 *pn_c_offset)
{
struct vcd_property_frame_size frame_sz = *frame_size;
u32 total_memory_size = 0, c_offset = 0;
ddl_calculate_stride(&frame_sz, interlace);
if (buf_format->buffer_format == VCD_BUFFER_FORMAT_TILE_4x2) {
u32 component_mem_size, width_round_up;
u32 height_round_up, height_chroma = (frame_sz.scan_lines >> 1);
width_round_up =
DDL_ALIGN(frame_sz.stride, DDL_TILE_ALIGN_WIDTH);
height_round_up =
DDL_ALIGN(frame_sz.scan_lines,
DDL_TILE_ALIGN_HEIGHT);
component_mem_size = width_round_up * height_round_up;
component_mem_size = DDL_ALIGN(component_mem_size,
DDL_TILE_MULTIPLY_FACTOR);
c_offset = component_mem_size;
total_memory_size = ((component_mem_size +
DDL_TILE_BUF_ALIGN_GUARD_BYTES) &
DDL_TILE_BUF_ALIGN_MASK);
height_round_up = DDL_ALIGN(height_chroma,
DDL_TILE_ALIGN_HEIGHT);
component_mem_size = width_round_up * height_round_up;
component_mem_size = DDL_ALIGN(component_mem_size,
DDL_TILE_MULTIPLY_FACTOR);
total_memory_size += component_mem_size;
} else {
if (decoding)
total_memory_size = frame_sz.scan_lines *
frame_sz.stride;
else
total_memory_size = frame_sz.height * frame_sz.stride;
c_offset = DDL_ALIGN(total_memory_size,
DDL_LINEAR_MULTIPLY_FACTOR);
total_memory_size = c_offset + DDL_ALIGN(
total_memory_size >> 1, DDL_LINEAR_MULTIPLY_FACTOR);
}
if (pn_c_offset)
*pn_c_offset = c_offset;
return total_memory_size;
}
void ddl_calculate_stride(struct vcd_property_frame_size *frame_size,
u32 interlace)
{
frame_size->stride = DDL_ALIGN(frame_size->width,
DDL_LINEAR_ALIGN_WIDTH);
if (interlace)
frame_size->scan_lines = DDL_ALIGN(frame_size->height,
DDL_TILE_ALIGN_HEIGHT);
else
frame_size->scan_lines = DDL_ALIGN(frame_size->height,
DDL_LINEAR_ALIGN_HEIGHT);
}
static u32 ddl_valid_buffer_requirement(struct vcd_buffer_requirement
*original_buf_req, struct vcd_buffer_requirement *req_buf_req)
{
u32 status = false;
if (original_buf_req->max_count >= req_buf_req->actual_count &&
original_buf_req->min_count <=
req_buf_req->actual_count &&
!((original_buf_req->align - (u32)0x1) &
req_buf_req->align) &&
/*original_buf_req->align <= req_buf_req->align,*/
original_buf_req->sz <= req_buf_req->sz)
status = true;
else {
DDL_MSG_ERROR("ddl_valid_buf_req:Failed");
DDL_MSG_ERROR("codec_buf_req: min_cnt=%d, mx_cnt=%d, "
"align=%d, sz=%d\n", original_buf_req->min_count,
original_buf_req->max_count, original_buf_req->align,
original_buf_req->sz);
DDL_MSG_ERROR("client_buffs: actual_count=%d, align=%d, "
"sz=%d\n", req_buf_req->actual_count,
req_buf_req->align, req_buf_req->sz);
}
return status;
}
static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *decoder)
{
u32 min_dpb = 0;
if (decoder->idr_only_decoding) {
min_dpb = DDL_MIN_BUFFER_COUNT;
if (decoder->post_filter.post_filter)
min_dpb *= 2;
return min_dpb;
}
switch (decoder->codec.codec) {
case VCD_CODEC_H264:
{
u32 yuv_size_in_mb = DDL_MIN(DDL_NO_OF_MB(
decoder->client_frame_size.stride,
decoder->client_frame_size.scan_lines),
MAX_FRAME_SIZE_L4PT0_MBS);
min_dpb = DDL_MIN((MAX_DPB_SIZE_L4PT0_MBS /
yuv_size_in_mb), 16);
min_dpb += 2;
}
break;
case VCD_CODEC_H263:
min_dpb = 3;
break;
default:
case VCD_CODEC_MPEG1:
case VCD_CODEC_MPEG2:
case VCD_CODEC_MPEG4:
case VCD_CODEC_DIVX_3:
case VCD_CODEC_DIVX_4:
case VCD_CODEC_DIVX_5:
case VCD_CODEC_DIVX_6:
case VCD_CODEC_XVID:
case VCD_CODEC_VC1:
case VCD_CODEC_VC1_RCV:
min_dpb = 4;
if (decoder->post_filter.post_filter)
min_dpb *= 2;
break;
}
return min_dpb;
}
static u32 ddl_set_dec_buffers(struct ddl_decoder_data *decoder,
struct ddl_property_dec_pic_buffers *dpb)
{
u32 vcd_status = VCD_S_SUCCESS, loopc;
for (loopc = 0; !vcd_status &&
loopc < dpb->no_of_dec_pic_buf; ++loopc) {
if ((!DDL_ADDR_IS_ALIGNED(dpb->dec_pic_buffers[loopc].
vcd_frm.physical,
decoder->client_output_buf_req.align)) ||
(dpb->dec_pic_buffers[loopc].vcd_frm.alloc_len <
decoder->client_output_buf_req.sz))
vcd_status = VCD_ERR_ILLEGAL_PARM;
}
if (vcd_status) {
DDL_MSG_ERROR("ddl_set_prop:"
"Dpb_align_fail_or_alloc_size_small");
return vcd_status;
}
if (decoder->dp_buf.no_of_dec_pic_buf) {
kfree(decoder->dp_buf.dec_pic_buffers);
decoder->dp_buf.no_of_dec_pic_buf = 0;
}
decoder->dp_buf.dec_pic_buffers =
kmalloc(dpb->no_of_dec_pic_buf *
sizeof(struct ddl_frame_data_tag), GFP_KERNEL);
if (!decoder->dp_buf.dec_pic_buffers) {
DDL_MSG_ERROR("ddl_dec_set_prop:Dpb_container_alloc_failed");
return VCD_ERR_ALLOC_FAIL;
}
decoder->dp_buf.no_of_dec_pic_buf = dpb->no_of_dec_pic_buf;
for (loopc = 0; loopc < dpb->no_of_dec_pic_buf; ++loopc)
decoder->dp_buf.dec_pic_buffers[loopc] =
dpb->dec_pic_buffers[loopc];
decoder->dpb_mask.client_mask = 0;
decoder->dpb_mask.hw_mask = 0;
decoder->dynamic_prop_change = 0;
return VCD_S_SUCCESS;
}
void ddl_set_initial_default_values(struct ddl_client_context *ddl)
{
if (ddl->decoding) {
ddl->codec_data.decoder.codec.codec = VCD_CODEC_MPEG4;
ddl_set_default_dec_property(ddl);
} else {
struct ddl_encoder_data *encoder =
&(ddl->codec_data.encoder);
encoder->codec.codec = VCD_CODEC_MPEG4;
encoder->target_bit_rate.target_bitrate = 64000;
encoder->frame_size.width = VCD_DDL_TEST_DEFAULT_WIDTH;
encoder->frame_size.height = VCD_DDL_TEST_DEFAULT_HEIGHT;
encoder->frame_size.scan_lines =
VCD_DDL_TEST_DEFAULT_HEIGHT;
encoder->frame_size.stride = VCD_DDL_TEST_DEFAULT_WIDTH;
encoder->frame_rate.fps_numerator = DDL_INITIAL_FRAME_RATE;
encoder->frame_rate.fps_denominator = 1;
ddl_set_default_enc_property(ddl);
encoder->sps_pps.sps_pps_for_idr_enable_flag = false;
encoder->plusptype_enable = 0;
}
}
| gpl-2.0 |
Kaisrlik/linux | drivers/w1/slaves/w1_ds2408.c | 2040 | 9819 | /*
* w1_ds2408.c - w1 family 29 (DS2408) driver
*
* Copyright (c) 2010 Jean-Francois Dagenais <dagenaisj@sonatest.com>
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "../w1.h"
#include "../w1_int.h"
#include "../w1_family.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2408));
#define W1_F29_RETRIES 3
#define W1_F29_REG_LOGIG_STATE 0x88 /* R */
#define W1_F29_REG_OUTPUT_LATCH_STATE 0x89 /* R */
#define W1_F29_REG_ACTIVITY_LATCH_STATE 0x8A /* R */
#define W1_F29_REG_COND_SEARCH_SELECT_MASK 0x8B /* RW */
#define W1_F29_REG_COND_SEARCH_POL_SELECT 0x8C /* RW */
#define W1_F29_REG_CONTROL_AND_STATUS 0x8D /* RW */
#define W1_F29_FUNC_READ_PIO_REGS 0xF0
#define W1_F29_FUNC_CHANN_ACCESS_READ 0xF5
#define W1_F29_FUNC_CHANN_ACCESS_WRITE 0x5A
/* also used to write the control/status reg (0x8D): */
#define W1_F29_FUNC_WRITE_COND_SEARCH_REG 0xCC
#define W1_F29_FUNC_RESET_ACTIVITY_LATCHES 0xC3
#define W1_F29_SUCCESS_CONFIRM_BYTE 0xAA
static int _read_reg(struct w1_slave *sl, u8 address, unsigned char* buf)
{
u8 wrbuf[3];
dev_dbg(&sl->dev,
"Reading with slave: %p, reg addr: %0#4x, buff addr: %p",
sl, (unsigned int)address, buf);
if (!buf)
return -EINVAL;
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl)) {
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
wrbuf[0] = W1_F29_FUNC_READ_PIO_REGS;
wrbuf[1] = address;
wrbuf[2] = 0;
w1_write_block(sl->master, wrbuf, 3);
*buf = w1_read_8(sl->master);
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex unlocked");
return 1;
}
static ssize_t state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off,
size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj), W1_F29_REG_LOGIG_STATE, buf);
}
static ssize_t output_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_OUTPUT_LATCH_STATE, buf);
}
static ssize_t activity_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_ACTIVITY_LATCH_STATE, buf);
}
static ssize_t cond_search_mask_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_COND_SEARCH_SELECT_MASK, buf);
}
static ssize_t cond_search_polarity_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_COND_SEARCH_POL_SELECT, buf);
}
static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
return _read_reg(kobj_to_w1_slave(kobj),
W1_F29_REG_CONTROL_AND_STATUS, buf);
}
static ssize_t output_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
u8 readBack;
unsigned int retries = W1_F29_RETRIES;
if (count != 1 || off != 0)
return -EFAULT;
dev_dbg(&sl->dev, "locking mutex for write_output");
mutex_lock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex locked");
if (w1_reset_select_slave(sl))
goto error;
while (retries--) {
w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE;
w1_buf[1] = *buf;
w1_buf[2] = ~(*buf);
w1_write_block(sl->master, w1_buf, 3);
readBack = w1_read_8(sl->master);
if (readBack != W1_F29_SUCCESS_CONFIRM_BYTE) {
if (w1_reset_resume_command(sl->master))
goto error;
/* try again, the slave is ready for a command */
continue;
}
#ifdef CONFIG_W1_SLAVE_DS2408_READBACK
/* here the master could read another byte which
would be the PIO reg (the actual pin logic state)
since in this driver we don't know which pins are
in and outs, there's no value to read the state and
compare. with (*buf) so end this command abruptly: */
if (w1_reset_resume_command(sl->master))
goto error;
/* go read back the output latches */
/* (the direct effect of the write above) */
w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
w1_buf[2] = 0;
w1_write_block(sl->master, w1_buf, 3);
/* read the result of the READ_PIO_REGS command */
if (w1_read_8(sl->master) == *buf)
#endif
{
/* success! */
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev,
"mutex unlocked, retries:%d", retries);
return 1;
}
}
error:
mutex_unlock(&sl->master->bus_mutex);
dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
return -EIO;
}
/**
* Writing to the activity file resets the activity latches.
*/
static ssize_t activity_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
unsigned int retries = W1_F29_RETRIES;
if (count != 1 || off != 0)
return -EFAULT;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl))
goto error;
while (retries--) {
w1_write_8(sl->master, W1_F29_FUNC_RESET_ACTIVITY_LATCHES);
if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE) {
mutex_unlock(&sl->master->bus_mutex);
return 1;
}
if (w1_reset_resume_command(sl->master))
goto error;
}
error:
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
static ssize_t status_control_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[4];
unsigned int retries = W1_F29_RETRIES;
if (count != 1 || off != 0)
return -EFAULT;
mutex_lock(&sl->master->bus_mutex);
if (w1_reset_select_slave(sl))
goto error;
while (retries--) {
w1_buf[0] = W1_F29_FUNC_WRITE_COND_SEARCH_REG;
w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
w1_buf[2] = 0;
w1_buf[3] = *buf;
w1_write_block(sl->master, w1_buf, 4);
if (w1_reset_resume_command(sl->master))
goto error;
w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
w1_buf[2] = 0;
w1_write_block(sl->master, w1_buf, 3);
if (w1_read_8(sl->master) == *buf) {
/* success! */
mutex_unlock(&sl->master->bus_mutex);
return 1;
}
}
error:
mutex_unlock(&sl->master->bus_mutex);
return -EIO;
}
/*
* This is a special sequence we must do to ensure the P0 output is not stuck
* in test mode. This is described in rev 2 of the ds2408's datasheet
* (http://datasheets.maximintegrated.com/en/ds/DS2408.pdf) under
* "APPLICATION INFORMATION/Power-up timing".
*/
static int w1_f29_disable_test_mode(struct w1_slave *sl)
{
int res;
u8 magic[10] = {0x96, };
u64 rn = le64_to_cpu(*((u64*)&sl->reg_num));
memcpy(&magic[1], &rn, 8);
magic[9] = 0x3C;
mutex_lock(&sl->master->bus_mutex);
res = w1_reset_bus(sl->master);
if (res)
goto out;
w1_write_block(sl->master, magic, ARRAY_SIZE(magic));
res = w1_reset_bus(sl->master);
out:
mutex_unlock(&sl->master->bus_mutex);
return res;
}
static BIN_ATTR_RO(state, 1);
static BIN_ATTR_RW(output, 1);
static BIN_ATTR_RW(activity, 1);
static BIN_ATTR_RO(cond_search_mask, 1);
static BIN_ATTR_RO(cond_search_polarity, 1);
static BIN_ATTR_RW(status_control, 1);
static struct bin_attribute *w1_f29_bin_attrs[] = {
&bin_attr_state,
&bin_attr_output,
&bin_attr_activity,
&bin_attr_cond_search_mask,
&bin_attr_cond_search_polarity,
&bin_attr_status_control,
NULL,
};
static const struct attribute_group w1_f29_group = {
.bin_attrs = w1_f29_bin_attrs,
};
static const struct attribute_group *w1_f29_groups[] = {
&w1_f29_group,
NULL,
};
static struct w1_family_ops w1_f29_fops = {
.add_slave = w1_f29_disable_test_mode,
.groups = w1_f29_groups,
};
static struct w1_family w1_family_29 = {
.fid = W1_FAMILY_DS2408,
.fops = &w1_f29_fops,
};
static int __init w1_f29_init(void)
{
return w1_register_family(&w1_family_29);
}
static void __exit w1_f29_exit(void)
{
w1_unregister_family(&w1_family_29);
}
module_init(w1_f29_init);
module_exit(w1_f29_exit);
| gpl-2.0 |
Frontier314/frontkernel_kitkat | drivers/tty/serial/omap-serial.c | 2040 | 36480 | /*
* Driver for OMAP-UART controller.
* Based on drivers/serial/8250.c
*
* Copyright (C) 2010 Texas Instruments.
*
* Authors:
* Govindraj R <govindraj.raja@ti.com>
* Thara Gopinath <thara@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Note: This driver is made separate from 8250 driver as we cannot
* over load 8250 driver with omap platform specific configuration for
* features like DMA, it makes easier to implement features like DMA and
* hardware flow control and software flow control configuration with
* this driver as required for the omap-platform.
*/
#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <plat/dma.h>
#include <plat/dmtimer.h>
#include <plat/omap-serial.h>
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
static void serial_omap_rx_timeout(unsigned long uart_no);
static int serial_omap_start_rxdma(struct uart_omap_port *up);
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
offset <<= up->port.regshift;
return readw(up->port.membase + offset);
}
static inline void serial_out(struct uart_omap_port *up, int offset, int value)
{
offset <<= up->port.regshift;
writew(value, up->port.membase + offset);
}
static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
{
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
}
/*
* serial_omap_get_divisor - calculate divisor value
* @port: uart port info
* @baud: baudrate for which divisor needs to be calculated.
*
* We have written our own function to get the divisor so as to support
* 13x mode. 3Mbps Baudrate as an different divisor.
* Reference OMAP TRM Chapter 17:
* Table 17-1. UART Mode Baud Rates, Divisor Values, and Error Rates
* referring to oversampling - divisor value
* baudrate 460,800 to 3,686,400 all have divisor 13
* except 3,000,000 which has divisor value 16
*/
static unsigned int
serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int divisor;
if (baud > OMAP_MODE13X_SPEED && baud != 3000000)
divisor = 13;
else
divisor = 16;
return port->uartclk/(baud * divisor);
}
static void serial_omap_stop_rxdma(struct uart_omap_port *up)
{
if (up->uart_dma.rx_dma_used) {
del_timer(&up->uart_dma.rx_timer);
omap_stop_dma(up->uart_dma.rx_dma_channel);
omap_free_dma(up->uart_dma.rx_dma_channel);
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_used = false;
}
}
static void serial_omap_enable_ms(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
}
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
if (up->use_dma &&
up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE) {
/*
* Check if dma is still active. If yes do nothing,
* return. Else stop dma
*/
if (omap_get_dma_active_status(up->uart_dma.tx_dma_channel))
return;
omap_stop_dma(up->uart_dma.tx_dma_channel);
omap_free_dma(up->uart_dma.tx_dma_channel);
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
}
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
if (up->use_dma)
serial_omap_stop_rxdma(up);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
static inline void receive_chars(struct uart_omap_port *up, int *status)
{
struct tty_struct *tty = up->port.state->port.tty;
unsigned int flag;
unsigned char ch, lsr = *status;
int max_count = 256;
do {
if (likely(lsr & UART_LSR_DR))
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
/*
* For statistics only
*/
if (lsr & UART_LSR_BI) {
lsr &= ~(UART_LSR_FE | UART_LSR_PE);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
goto ignore_char;
} else if (lsr & UART_LSR_PE) {
up->port.icount.parity++;
} else if (lsr & UART_LSR_FE) {
up->port.icount.frame++;
}
if (lsr & UART_LSR_OE)
up->port.icount.overrun++;
/*
* Mask off conditions which should be ignored.
*/
lsr &= up->port.read_status_mask;
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
if (up->port.line == up->port.cons->index) {
/* Recover the break flag from console xmit */
lsr |= up->lsr_break_flag;
}
#endif
if (lsr & UART_LSR_BI)
flag = TTY_BREAK;
else if (lsr & UART_LSR_PE)
flag = TTY_PARITY;
else if (lsr & UART_LSR_FE)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
ignore_char:
lsr = serial_in(up, UART_LSR);
} while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
spin_unlock(&up->port.lock);
tty_flip_buffer_push(tty);
spin_lock(&up->port.lock);
}
static void transmit_chars(struct uart_omap_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
serial_omap_stop_tx(&up->port);
return;
}
count = up->port.fifosize / 4;
do {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (uart_circ_empty(xmit))
serial_omap_stop_tx(&up->port);
}
static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
{
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_omap_start_tx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
struct circ_buf *xmit;
unsigned int start;
int ret = 0;
if (!up->use_dma) {
serial_omap_enable_ier_thri(up);
return;
}
if (up->uart_dma.tx_dma_used)
return;
xmit = &up->port.state->xmit;
if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
ret = omap_request_dma(up->uart_dma.uart_dma_tx,
"UART Tx DMA",
(void *)uart_tx_dma_callback, up,
&(up->uart_dma.tx_dma_channel));
if (ret < 0) {
serial_omap_enable_ier_thri(up);
return;
}
}
spin_lock(&(up->uart_dma.tx_lock));
up->uart_dma.tx_dma_used = true;
spin_unlock(&(up->uart_dma.tx_lock));
start = up->uart_dma.tx_buf_dma_phys +
(xmit->tail & (UART_XMIT_SIZE - 1));
up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
/*
* It is a circular buffer. See if the buffer has wounded back.
* If yes it will have to be transferred in two separate dma
* transfers
*/
if (start + up->uart_dma.tx_buf_size >=
up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
up->uart_dma.tx_buf_size =
(up->uart_dma.tx_buf_dma_phys +
UART_XMIT_SIZE) - start;
omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
OMAP_DMA_AMODE_CONSTANT,
up->uart_dma.uart_base, 0, 0);
omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
OMAP_DMA_AMODE_POST_INC, start, 0, 0);
omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
OMAP_DMA_DATA_TYPE_S8,
up->uart_dma.tx_buf_size, 1,
OMAP_DMA_SYNC_ELEMENT,
up->uart_dma.uart_dma_tx, 0);
/* FIXME: Cache maintenance needed here? */
omap_start_dma(up->uart_dma.tx_dma_channel);
}
static unsigned int check_modem_status(struct uart_omap_port *up)
{
unsigned int status;
status = serial_in(up, UART_MSR);
status |= up->msr_saved_flags;
up->msr_saved_flags = 0;
if ((status & UART_MSR_ANY_DELTA) == 0)
return status;
if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
up->port.state != NULL) {
if (status & UART_MSR_TERI)
up->port.icount.rng++;
if (status & UART_MSR_DDSR)
up->port.icount.dsr++;
if (status & UART_MSR_DDCD)
uart_handle_dcd_change
(&up->port, status & UART_MSR_DCD);
if (status & UART_MSR_DCTS)
uart_handle_cts_change
(&up->port, status & UART_MSR_CTS);
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
}
return status;
}
/**
* serial_omap_irq() - This handles the interrupt from one port
* @irq: uart port irq number
* @dev_id: uart port info
*/
static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
{
struct uart_omap_port *up = dev_id;
unsigned int iir, lsr;
unsigned long flags;
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
return IRQ_NONE;
spin_lock_irqsave(&up->port.lock, flags);
lsr = serial_in(up, UART_LSR);
if (iir & UART_IIR_RLSI) {
if (!up->use_dma) {
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr);
} else {
up->ier &= ~(UART_IER_RDI | UART_IER_RLSI);
serial_out(up, UART_IER, up->ier);
if ((serial_omap_start_rxdma(up) != 0) &&
(lsr & UART_LSR_DR))
receive_chars(up, &lsr);
}
}
check_modem_status(up);
if ((lsr & UART_LSR_THRE) && (iir & UART_IIR_THRI))
transmit_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
up->port_activity = jiffies;
return IRQ_HANDLED;
}
static unsigned int serial_omap_tx_empty(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
unsigned int ret = 0;
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
return ret;
}
static unsigned int serial_omap_get_mctrl(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char status;
unsigned int ret = 0;
status = check_modem_status(up);
dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char mcr = 0;
dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
mcr |= up->mcr;
serial_out(up, UART_MCR, mcr);
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int serial_omap_startup(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
up->name, up);
if (retval)
return retval;
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial_omap_clear_fifos(up);
/* For Hardware flow control */
serial_out(up, UART_MCR, UART_MCR_RTS);
/*
* Clear the interrupt registers.
*/
(void) serial_in(up, UART_LSR);
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
/*
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
spin_lock_irqsave(&up->port.lock, flags);
/*
* Most PC uarts need OUT2 raised to enable interrupts.
*/
up->port.mctrl |= TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
up->msr_saved_flags = 0;
if (up->use_dma) {
free_page((unsigned long)up->port.state->xmit.buf);
up->port.state->xmit.buf = dma_alloc_coherent(NULL,
UART_XMIT_SIZE,
(dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
0);
init_timer(&(up->uart_dma.rx_timer));
up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
up->uart_dma.rx_timer.data = up->pdev->id;
/* Currently the buffer size is 4KB. Can increase it */
up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
up->uart_dma.rx_buf_size,
(dma_addr_t *)&(up->uart_dma.rx_buf_dma_phys), 0);
}
/*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
/* Enable module level wake up */
serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
up->port_activity = jiffies;
return 0;
}
static void serial_omap_shutdown(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
/*
* Disable interrupts from this port
*/
up->ier = 0;
serial_out(up, UART_IER, 0);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_omap_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Disable break condition and FIFOs
*/
serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
serial_omap_clear_fifos(up);
/*
* Read data port to reset things, and then free the irq
*/
if (serial_in(up, UART_LSR) & UART_LSR_DR)
(void) serial_in(up, UART_RX);
if (up->use_dma) {
dma_free_coherent(up->port.dev,
UART_XMIT_SIZE, up->port.state->xmit.buf,
up->uart_dma.tx_buf_dma_phys);
up->port.state->xmit.buf = NULL;
serial_omap_stop_rx(port);
dma_free_coherent(up->port.dev,
up->uart_dma.rx_buf_size, up->uart_dma.rx_buf,
up->uart_dma.rx_buf_dma_phys);
up->uart_dma.rx_buf = NULL;
}
free_irq(up->port.irq, up);
}
static inline void
serial_omap_configure_xonxoff
(struct uart_omap_port *up, struct ktermios *termios)
{
unsigned char efr = 0;
up->lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, up->efr & ~UART_EFR_ECB);
serial_out(up, UART_XON1, termios->c_cc[VSTART]);
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* clear SW control mode bits */
efr = up->efr;
efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
* Enable XON/XOFF flow control on output.
* Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXON)
efr |= OMAP_UART_SW_TX;
/*
* IXOFF Flag:
* Enable XON/XOFF flow control on input.
* Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXOFF)
efr |= OMAP_UART_SW_RX;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
up->mcr = serial_in(up, UART_MCR);
/*
* IXANY Flag:
* Enable any character to restart output.
* Operation resumes after receiving any
* character after recognition of the XOFF character
*/
if (termios->c_iflag & IXANY)
up->mcr |= UART_MCR_XONANY;
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
/* Enable special char function UARTi.EFR_REG[5] and
* load the new software flow control mode IXON or IXOFF
* and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
*/
serial_out(up, UART_EFR, efr | UART_EFR_SCD);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
serial_out(up, UART_LCR, up->lcr);
}
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char cval = 0;
unsigned char efr = 0;
unsigned long flags = 0;
unsigned int baud, quot;
switch (termios->c_cflag & CSIZE) {
case CS5:
cval = UART_LCR_WLEN5;
break;
case CS6:
cval = UART_LCR_WLEN6;
break;
case CS7:
cval = UART_LCR_WLEN7;
break;
default:
case CS8:
cval = UART_LCR_WLEN8;
break;
}
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
if (up->use_dma)
up->fcr |= UART_FCR_DMA_SELECT;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_OE;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= UART_LSR_DR;
/*
* Modem status interrupts
*/
up->ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
/* FIFOs and DMA Settings */
/* FCR can be changed only when the
* baud clock is not running
* DLL_REG and DLH_REG set to 0.
*/
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_DLL, 0);
serial_out(up, UART_DLM, 0);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
up->mcr = serial_in(up, UART_MCR);
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
serial_out(up, UART_FCR, up->fcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
if (up->use_dma) {
serial_out(up, UART_TI752_TLR, 0);
serial_out(up, UART_OMAP_SCR,
(UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
}
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
/* Protocol, Baud Rate, and Interrupt Settings */
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, cval);
if (baud > 230400 && baud != 3000000)
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
else
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
/* Hardware Flow Control Configuration */
if (termios->c_cflag & CRTSCTS) {
efr |= (UART_EFR_CTS | UART_EFR_RTS);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
up->mcr = serial_in(up, UART_MCR);
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS);
serial_out(up, UART_LCR, cval);
}
serial_omap_set_mctrl(&up->port, up->port.mctrl);
/* Software Flow Control Configuration */
serial_omap_configure_xonxoff(up, termios);
spin_unlock_irqrestore(&up->port.lock, flags);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
}
static void
serial_omap_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char efr;
dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
}
static void serial_omap_release_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_release_port+\n");
}
static int serial_omap_request_port(struct uart_port *port)
{
dev_dbg(port->dev, "serial_omap_request_port+\n");
return 0;
}
static void serial_omap_config_port(struct uart_port *port, int flags)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
up->pdev->id);
up->port.type = PORT_OMAP;
}
static int
serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
dev_dbg(port->dev, "serial_omap_verify_port+\n");
return -EINVAL;
}
static const char *
serial_omap_type(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
return up->name;
}
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
static inline void wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = serial_in(up, UART_LSR);
if (status & UART_LSR_BI)
up->lsr_break_flag = UART_LSR_BI;
if (--tmout == 0)
break;
udelay(1);
} while ((status & BOTH_EMPTY) != BOTH_EMPTY);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR);
up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
if (msr & UART_MSR_CTS)
break;
udelay(1);
}
}
}
#ifdef CONFIG_CONSOLE_POLL
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned int status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR))
return NO_POLL_CHAR;
return serial_in(up, UART_RX);
}
#endif /* CONFIG_CONSOLE_POLL */
#ifdef CONFIG_SERIAL_OMAP_CONSOLE
static struct uart_omap_port *serial_omap_console_ports[4];
static struct uart_driver serial_omap_reg;
static void serial_omap_console_putchar(struct uart_port *port, int ch)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
static void
serial_omap_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_omap_port *up = serial_omap_console_ports[co->index];
unsigned long flags;
unsigned int ier;
int locked = 1;
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&up->port.lock);
else
spin_lock(&up->port.lock);
/*
* First save the IER then disable the interrupts
*/
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, 0);
uart_console_write(&up->port, s, count, serial_omap_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
/*
* The receive handling will happen properly because the
* receive ready bit will still be set; it is not cleared
* on read. However, modem control will not, we must
* call it if we have saved something in the saved flags
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
check_modem_status(up);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
}
static int __init
serial_omap_console_setup(struct console *co, char *options)
{
struct uart_omap_port *up;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (serial_omap_console_ports[co->index] == NULL)
return -ENODEV;
up = serial_omap_console_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_omap_console = {
.name = OMAP_SERIAL_NAME,
.write = serial_omap_console_write,
.device = uart_console_device,
.setup = serial_omap_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_omap_reg,
};
static void serial_omap_add_console_port(struct uart_omap_port *up)
{
serial_omap_console_ports[up->pdev->id] = up;
}
#define OMAP_CONSOLE (&serial_omap_console)
#else
#define OMAP_CONSOLE NULL
static inline void serial_omap_add_console_port(struct uart_omap_port *up)
{}
#endif
static struct uart_ops serial_omap_pops = {
.tx_empty = serial_omap_tx_empty,
.set_mctrl = serial_omap_set_mctrl,
.get_mctrl = serial_omap_get_mctrl,
.stop_tx = serial_omap_stop_tx,
.start_tx = serial_omap_start_tx,
.stop_rx = serial_omap_stop_rx,
.enable_ms = serial_omap_enable_ms,
.break_ctl = serial_omap_break_ctl,
.startup = serial_omap_startup,
.shutdown = serial_omap_shutdown,
.set_termios = serial_omap_set_termios,
.pm = serial_omap_pm,
.type = serial_omap_type,
.release_port = serial_omap_release_port,
.request_port = serial_omap_request_port,
.config_port = serial_omap_config_port,
.verify_port = serial_omap_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = serial_omap_poll_put_char,
.poll_get_char = serial_omap_poll_get_char,
#endif
};
static struct uart_driver serial_omap_reg = {
.owner = THIS_MODULE,
.driver_name = "OMAP-SERIAL",
.dev_name = OMAP_SERIAL_NAME,
.nr = OMAP_MAX_HSUART_PORTS,
.cons = OMAP_CONSOLE,
};
static int
serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
{
struct uart_omap_port *up = platform_get_drvdata(pdev);
if (up)
uart_suspend_port(&serial_omap_reg, &up->port);
return 0;
}
static int serial_omap_resume(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
if (up)
uart_resume_port(&serial_omap_reg, &up->port);
return 0;
}
static void serial_omap_rx_timeout(unsigned long uart_no)
{
struct uart_omap_port *up = ui[uart_no];
unsigned int curr_dma_pos, curr_transmitted_size;
int ret = 0;
curr_dma_pos = omap_get_dma_dst_pos(up->uart_dma.rx_dma_channel);
if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
(curr_dma_pos == 0)) {
if (jiffies_to_msecs(jiffies - up->port_activity) <
RX_TIMEOUT) {
mod_timer(&up->uart_dma.rx_timer, jiffies +
usecs_to_jiffies(up->uart_dma.rx_timeout));
} else {
serial_omap_stop_rxdma(up);
up->ier |= (UART_IER_RDI | UART_IER_RLSI);
serial_out(up, UART_IER, up->ier);
}
return;
}
curr_transmitted_size = curr_dma_pos -
up->uart_dma.prev_rx_dma_pos;
up->port.icount.rx += curr_transmitted_size;
tty_insert_flip_string(up->port.state->port.tty,
up->uart_dma.rx_buf +
(up->uart_dma.prev_rx_dma_pos -
up->uart_dma.rx_buf_dma_phys),
curr_transmitted_size);
tty_flip_buffer_push(up->port.state->port.tty);
up->uart_dma.prev_rx_dma_pos = curr_dma_pos;
if (up->uart_dma.rx_buf_size +
up->uart_dma.rx_buf_dma_phys == curr_dma_pos) {
ret = serial_omap_start_rxdma(up);
if (ret < 0) {
serial_omap_stop_rxdma(up);
up->ier |= (UART_IER_RDI | UART_IER_RLSI);
serial_out(up, UART_IER, up->ier);
}
} else {
mod_timer(&up->uart_dma.rx_timer, jiffies +
usecs_to_jiffies(up->uart_dma.rx_timeout));
}
up->port_activity = jiffies;
}
static void uart_rx_dma_callback(int lch, u16 ch_status, void *data)
{
return;
}
static int serial_omap_start_rxdma(struct uart_omap_port *up)
{
int ret = 0;
if (up->uart_dma.rx_dma_channel == -1) {
ret = omap_request_dma(up->uart_dma.uart_dma_rx,
"UART Rx DMA",
(void *)uart_rx_dma_callback, up,
&(up->uart_dma.rx_dma_channel));
if (ret < 0)
return ret;
omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
OMAP_DMA_AMODE_CONSTANT,
up->uart_dma.uart_base, 0, 0);
omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
OMAP_DMA_AMODE_POST_INC,
up->uart_dma.rx_buf_dma_phys, 0, 0);
omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
OMAP_DMA_DATA_TYPE_S8,
up->uart_dma.rx_buf_size, 1,
OMAP_DMA_SYNC_ELEMENT,
up->uart_dma.uart_dma_rx, 0);
}
up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
/* FIXME: Cache maintenance needed here? */
omap_start_dma(up->uart_dma.rx_dma_channel);
mod_timer(&up->uart_dma.rx_timer, jiffies +
usecs_to_jiffies(up->uart_dma.rx_timeout));
up->uart_dma.rx_dma_used = true;
return ret;
}
static void serial_omap_continue_tx(struct uart_omap_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
unsigned int start = up->uart_dma.tx_buf_dma_phys
+ (xmit->tail & (UART_XMIT_SIZE - 1));
if (uart_circ_empty(xmit))
return;
up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
/*
* It is a circular buffer. See if the buffer has wounded back.
* If yes it will have to be transferred in two separate dma
* transfers
*/
if (start + up->uart_dma.tx_buf_size >=
up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
up->uart_dma.tx_buf_size =
(up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start;
omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
OMAP_DMA_AMODE_CONSTANT,
up->uart_dma.uart_base, 0, 0);
omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
OMAP_DMA_AMODE_POST_INC, start, 0, 0);
omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
OMAP_DMA_DATA_TYPE_S8,
up->uart_dma.tx_buf_size, 1,
OMAP_DMA_SYNC_ELEMENT,
up->uart_dma.uart_dma_tx, 0);
/* FIXME: Cache maintenance needed here? */
omap_start_dma(up->uart_dma.tx_dma_channel);
}
static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
{
struct uart_omap_port *up = (struct uart_omap_port *)data;
struct circ_buf *xmit = &up->port.state->xmit;
xmit->tail = (xmit->tail + up->uart_dma.tx_buf_size) & \
(UART_XMIT_SIZE - 1);
up->port.icount.tx += up->uart_dma.tx_buf_size;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (uart_circ_empty(xmit)) {
spin_lock(&(up->uart_dma.tx_lock));
serial_omap_stop_tx(&up->port);
up->uart_dma.tx_dma_used = false;
spin_unlock(&(up->uart_dma.tx_lock));
} else {
omap_stop_dma(up->uart_dma.tx_dma_channel);
serial_omap_continue_tx(up);
}
up->port_activity = jiffies;
return;
}
static int serial_omap_probe(struct platform_device *pdev)
{
struct uart_omap_port *up;
struct resource *mem, *irq, *dma_tx, *dma_rx;
struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
int ret = -ENOSPC;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
return -ENODEV;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "no irq resource?\n");
return -ENODEV;
}
if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
pdev->dev.driver->name)) {
dev_err(&pdev->dev, "memory region already claimed\n");
return -EBUSY;
}
dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!dma_rx) {
ret = -EINVAL;
goto err;
}
dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
if (!dma_tx) {
ret = -EINVAL;
goto err;
}
up = kzalloc(sizeof(*up), GFP_KERNEL);
if (up == NULL) {
ret = -ENOMEM;
goto do_release_region;
}
sprintf(up->name, "OMAP UART%d", pdev->id);
up->pdev = pdev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
up->port.iotype = UPIO_MEM;
up->port.irq = irq->start;
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
up->port.line = pdev->id;
up->port.membase = omap_up_info->membase;
up->port.mapbase = omap_up_info->mapbase;
up->port.flags = omap_up_info->flags;
up->port.irqflags = omap_up_info->irqflags;
up->port.uartclk = omap_up_info->uartclk;
up->uart_dma.uart_base = mem->start;
if (omap_up_info->dma_enabled) {
up->uart_dma.uart_dma_tx = dma_tx->start;
up->uart_dma.uart_dma_rx = dma_rx->start;
up->use_dma = 1;
up->uart_dma.rx_buf_size = 4096;
up->uart_dma.rx_timeout = 2;
spin_lock_init(&(up->uart_dma.tx_lock));
spin_lock_init(&(up->uart_dma.rx_lock));
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
}
ui[pdev->id] = up;
serial_omap_add_console_port(up);
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
goto do_release_region;
platform_set_drvdata(pdev, up);
return 0;
err:
dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
pdev->id, __func__, ret);
do_release_region:
release_mem_region(mem->start, (mem->end - mem->start) + 1);
return ret;
}
static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
platform_set_drvdata(dev, NULL);
if (up) {
uart_remove_one_port(&serial_omap_reg, &up->port);
kfree(up);
}
return 0;
}
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
.suspend = serial_omap_suspend,
.resume = serial_omap_resume,
.driver = {
.name = DRIVER_NAME,
},
};
static int __init serial_omap_init(void)
{
int ret;
ret = uart_register_driver(&serial_omap_reg);
if (ret != 0)
return ret;
ret = platform_driver_register(&serial_omap_driver);
if (ret != 0)
uart_unregister_driver(&serial_omap_reg);
return ret;
}
static void __exit serial_omap_exit(void)
{
platform_driver_unregister(&serial_omap_driver);
uart_unregister_driver(&serial_omap_reg);
}
module_init(serial_omap_init);
module_exit(serial_omap_exit);
MODULE_DESCRIPTION("OMAP High Speed UART driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
| gpl-2.0 |
ASAZING/android_kernel_lanix_l900 | arch/arm/mach-omap2/devices.c | 2040 | 15314 | /*
* linux/arch/arm/mach-omap2/devices.c
*
* OMAP2 platform device setup/initialization
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/omap4-keypad.h>
#include <linux/platform_data/omap_ocp2scp.h>
#include <linux/usb/omap_control_usb.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <linux/omap-dma.h>
#include "iomap.h"
#include "omap_hwmod.h"
#include "omap_device.h"
#include "omap4-keypad.h"
#include "soc.h"
#include "common.h"
#include "mux.h"
#include "control.h"
#include "devices.h"
#include "dma.h"
#define L3_MODULES_MAX_LEN 12
#define L3_MODULES 3
static int __init omap3_l3_init(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
char oh_name[L3_MODULES_MAX_LEN];
/*
* To avoid code running on other OMAPs in
* multi-omap builds
*/
if (!(cpu_is_omap34xx()))
return -ENODEV;
snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main");
oh = omap_hwmod_lookup(oh_name);
if (!oh)
pr_err("could not look up %s\n", oh_name);
pdev = omap_device_build("omap_l3_smx", 0, oh, NULL, 0);
WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
}
omap_postcore_initcall(omap3_l3_init);
static int __init omap4_l3_init(void)
{
int i;
struct omap_hwmod *oh[3];
struct platform_device *pdev;
char oh_name[L3_MODULES_MAX_LEN];
/* If dtb is there, the devices will be created dynamically */
if (of_have_populated_dt())
return -ENODEV;
/*
* To avoid code running on other OMAPs in
* multi-omap builds
*/
if (!cpu_is_omap44xx() && !soc_is_omap54xx())
return -ENODEV;
for (i = 0; i < L3_MODULES; i++) {
snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main_%d", i+1);
oh[i] = omap_hwmod_lookup(oh_name);
if (!(oh[i]))
pr_err("could not look up %s\n", oh_name);
}
pdev = omap_device_build_ss("omap_l3_noc", 0, oh, 3, NULL, 0);
WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
}
omap_postcore_initcall(omap4_l3_init);
#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
static struct resource omap2cam_resources[] = {
{
.start = OMAP24XX_CAMERA_BASE,
.end = OMAP24XX_CAMERA_BASE + 0xfff,
.flags = IORESOURCE_MEM,
},
{
.start = 24 + OMAP_INTC_START,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device omap2cam_device = {
.name = "omap24xxcam",
.id = -1,
.num_resources = ARRAY_SIZE(omap2cam_resources),
.resource = omap2cam_resources,
};
#endif
#if defined(CONFIG_IOMMU_API)
#include <linux/platform_data/iommu-omap.h>
static struct resource omap3isp_resources[] = {
{
.start = OMAP3430_ISP_BASE,
.end = OMAP3430_ISP_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_CCP2_BASE,
.end = OMAP3430_ISP_CCP2_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_CCDC_BASE,
.end = OMAP3430_ISP_CCDC_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_HIST_BASE,
.end = OMAP3430_ISP_HIST_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_H3A_BASE,
.end = OMAP3430_ISP_H3A_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_PREV_BASE,
.end = OMAP3430_ISP_PREV_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_RESZ_BASE,
.end = OMAP3430_ISP_RESZ_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_SBL_BASE,
.end = OMAP3430_ISP_SBL_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_CSI2A_REGS1_BASE,
.end = OMAP3430_ISP_CSI2A_REGS1_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3430_ISP_CSIPHY2_BASE,
.end = OMAP3430_ISP_CSIPHY2_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3630_ISP_CSI2A_REGS2_BASE,
.end = OMAP3630_ISP_CSI2A_REGS2_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3630_ISP_CSI2C_REGS1_BASE,
.end = OMAP3630_ISP_CSI2C_REGS1_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3630_ISP_CSIPHY1_BASE,
.end = OMAP3630_ISP_CSIPHY1_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP3630_ISP_CSI2C_REGS2_BASE,
.end = OMAP3630_ISP_CSI2C_REGS2_END,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE,
.end = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE + 3,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL,
.end = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL + 3,
.flags = IORESOURCE_MEM,
},
{
.start = 24 + OMAP_INTC_START,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device omap3isp_device = {
.name = "omap3isp",
.id = -1,
.num_resources = ARRAY_SIZE(omap3isp_resources),
.resource = omap3isp_resources,
};
static struct omap_iommu_arch_data omap3_isp_iommu = {
.name = "mmu_isp",
};
int omap3_init_camera(struct isp_platform_data *pdata)
{
omap3isp_device.dev.platform_data = pdata;
omap3isp_device.dev.archdata.iommu = &omap3_isp_iommu;
return platform_device_register(&omap3isp_device);
}
#else /* !CONFIG_IOMMU_API */
int omap3_init_camera(struct isp_platform_data *pdata)
{
return 0;
}
#endif
static inline void omap_init_camera(void)
{
#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
if (cpu_is_omap24xx())
platform_device_register(&omap2cam_device);
#endif
}
#if IS_ENABLED(CONFIG_OMAP_CONTROL_USB)
static struct omap_control_usb_platform_data omap4_control_usb_pdata = {
.type = 1,
};
struct resource omap4_control_usb_res[] = {
{
.name = "control_dev_conf",
.start = 0x4a002300,
.end = 0x4a002303,
.flags = IORESOURCE_MEM,
},
{
.name = "otghs_control",
.start = 0x4a00233c,
.end = 0x4a00233f,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap4_control_usb = {
.name = "omap-control-usb",
.id = -1,
.dev = {
.platform_data = &omap4_control_usb_pdata,
},
.num_resources = 2,
.resource = omap4_control_usb_res,
};
static inline void __init omap_init_control_usb(void)
{
if (!cpu_is_omap44xx())
return;
if (platform_device_register(&omap4_control_usb))
pr_err("Error registering omap_control_usb device\n");
}
#else
static inline void omap_init_control_usb(void) { }
#endif /* CONFIG_OMAP_CONTROL_USB */
int __init omap4_keyboard_init(struct omap4_keypad_platform_data
*sdp4430_keypad_data, struct omap_board_data *bdata)
{
struct platform_device *pdev;
struct omap_hwmod *oh;
struct omap4_keypad_platform_data *keypad_data;
unsigned int id = -1;
char *oh_name = "kbd";
char *name = "omap4-keypad";
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
pr_err("Could not look up %s\n", oh_name);
return -ENODEV;
}
keypad_data = sdp4430_keypad_data;
pdev = omap_device_build(name, id, oh, keypad_data,
sizeof(struct omap4_keypad_platform_data));
if (IS_ERR(pdev)) {
WARN(1, "Can't build omap_device for %s:%s.\n",
name, oh->name);
return PTR_ERR(pdev);
}
oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
return 0;
}
#if defined(CONFIG_OMAP_MBOX_FWK) || defined(CONFIG_OMAP_MBOX_FWK_MODULE)
static inline void __init omap_init_mbox(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
oh = omap_hwmod_lookup("mailbox");
if (!oh) {
pr_err("%s: unable to find hwmod\n", __func__);
return;
}
pdev = omap_device_build("omap-mailbox", -1, oh, NULL, 0);
WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n",
__func__, PTR_ERR(pdev));
}
#else
static inline void omap_init_mbox(void) { }
#endif /* CONFIG_OMAP_MBOX_FWK */
static inline void omap_init_sti(void) {}
#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
static struct platform_device omap_pcm = {
.name = "omap-pcm-audio",
.id = -1,
};
static void omap_init_audio(void)
{
platform_device_register(&omap_pcm);
}
#else
static inline void omap_init_audio(void) {}
#endif
#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \
defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE)
static void __init omap_init_mcpdm(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
oh = omap_hwmod_lookup("mcpdm");
if (!oh) {
printk(KERN_ERR "Could not look up mcpdm hw_mod\n");
return;
}
pdev = omap_device_build("omap-mcpdm", -1, oh, NULL, 0);
WARN(IS_ERR(pdev), "Can't build omap_device for omap-mcpdm.\n");
}
#else
static inline void omap_init_mcpdm(void) {}
#endif
#if defined(CONFIG_SND_OMAP_SOC_DMIC) || \
defined(CONFIG_SND_OMAP_SOC_DMIC_MODULE)
static void __init omap_init_dmic(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
oh = omap_hwmod_lookup("dmic");
if (!oh) {
pr_err("Could not look up dmic hw_mod\n");
return;
}
pdev = omap_device_build("omap-dmic", -1, oh, NULL, 0);
WARN(IS_ERR(pdev), "Can't build omap_device for omap-dmic.\n");
}
#else
static inline void omap_init_dmic(void) {}
#endif
#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE)
static struct platform_device omap_hdmi_audio = {
.name = "omap-hdmi-audio",
.id = -1,
};
static void __init omap_init_hdmi_audio(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
oh = omap_hwmod_lookup("dss_hdmi");
if (!oh) {
printk(KERN_ERR "Could not look up dss_hdmi hw_mod\n");
return;
}
pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
WARN(IS_ERR(pdev),
"Can't build omap_device for omap-hdmi-audio-dai.\n");
platform_device_register(&omap_hdmi_audio);
}
#else
static inline void omap_init_hdmi_audio(void) {}
#endif
#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
#include <linux/platform_data/spi-omap2-mcspi.h>
static int __init omap_mcspi_init(struct omap_hwmod *oh, void *unused)
{
struct platform_device *pdev;
char *name = "omap2_mcspi";
struct omap2_mcspi_platform_config *pdata;
static int spi_num;
struct omap2_mcspi_dev_attr *mcspi_attrib = oh->dev_attr;
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
pr_err("Memory allocation for McSPI device failed\n");
return -ENOMEM;
}
pdata->num_cs = mcspi_attrib->num_chipselect;
switch (oh->class->rev) {
case OMAP2_MCSPI_REV:
case OMAP3_MCSPI_REV:
pdata->regs_offset = 0;
break;
case OMAP4_MCSPI_REV:
pdata->regs_offset = OMAP4_MCSPI_REG_OFFSET;
break;
default:
pr_err("Invalid McSPI Revision value\n");
kfree(pdata);
return -EINVAL;
}
spi_num++;
pdev = omap_device_build(name, spi_num, oh, pdata, sizeof(*pdata));
WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s\n",
name, oh->name);
kfree(pdata);
return 0;
}
static void omap_init_mcspi(void)
{
omap_hwmod_for_each_by_class("mcspi", omap_mcspi_init, NULL);
}
#else
static inline void omap_init_mcspi(void) {}
#endif
/**
* omap_init_rng - bind the RNG hwmod to the RNG omap_device
*
* Bind the RNG hwmod to the RNG omap_device. No return value.
*/
static void omap_init_rng(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
oh = omap_hwmod_lookup("rng");
if (!oh)
return;
pdev = omap_device_build("omap_rng", -1, oh, NULL, 0);
WARN(IS_ERR(pdev), "Can't build omap_device for omap_rng\n");
}
static void __init omap_init_sham(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
oh = omap_hwmod_lookup("sham");
if (!oh)
return;
pdev = omap_device_build("omap-sham", -1, oh, NULL, 0);
WARN(IS_ERR(pdev), "Can't build omap_device for omap-sham\n");
}
static void __init omap_init_aes(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
oh = omap_hwmod_lookup("aes");
if (!oh)
return;
pdev = omap_device_build("omap-aes", -1, oh, NULL, 0);
WARN(IS_ERR(pdev), "Can't build omap_device for omap-aes\n");
}
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_VIDEO_OMAP2_VOUT) || \
defined(CONFIG_VIDEO_OMAP2_VOUT_MODULE)
#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
static struct resource omap_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = {
};
#else
static struct resource omap_vout_resource[2] = {
};
#endif
static struct platform_device omap_vout_device = {
.name = "omap_vout",
.num_resources = ARRAY_SIZE(omap_vout_resource),
.resource = &omap_vout_resource[0],
.id = -1,
};
static void omap_init_vout(void)
{
if (platform_device_register(&omap_vout_device) < 0)
printk(KERN_ERR "Unable to register OMAP-VOUT device\n");
}
#else
static inline void omap_init_vout(void) {}
#endif
#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE)
static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
{
int cnt = 0;
while (ocp2scp_dev->drv_name != NULL) {
cnt++;
ocp2scp_dev++;
}
return cnt;
}
static void __init omap_init_ocp2scp(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
int bus_id = -1, dev_cnt = 0, i;
struct omap_ocp2scp_dev *ocp2scp_dev;
const char *oh_name, *name;
struct omap_ocp2scp_platform_data *pdata;
if (!cpu_is_omap44xx())
return;
oh_name = "ocp2scp_usb_phy";
name = "omap-ocp2scp";
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
pr_err("%s: could not find omap_hwmod for %s\n", __func__,
oh_name);
return;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
pr_err("%s: No memory for ocp2scp pdata\n", __func__);
return;
}
ocp2scp_dev = oh->dev_attr;
dev_cnt = count_ocp2scp_devices(ocp2scp_dev);
if (!dev_cnt) {
pr_err("%s: No devices connected to ocp2scp\n", __func__);
kfree(pdata);
return;
}
pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *)
* dev_cnt, GFP_KERNEL);
if (!pdata->devices) {
pr_err("%s: No memory for ocp2scp pdata devices\n", __func__);
kfree(pdata);
return;
}
for (i = 0; i < dev_cnt; i++, ocp2scp_dev++)
pdata->devices[i] = ocp2scp_dev;
pdata->dev_cnt = dev_cnt;
pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata));
if (IS_ERR(pdev)) {
pr_err("Could not build omap_device for %s %s\n",
name, oh_name);
kfree(pdata->devices);
kfree(pdata);
return;
}
}
#else
static inline void omap_init_ocp2scp(void) { }
#endif
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
{
/* Enable dummy states for those platforms without pinctrl support */
if (!of_have_populated_dt())
pinctrl_provide_dummies();
/*
* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
omap_init_audio();
omap_init_camera();
omap_init_hdmi_audio();
omap_init_mbox();
/* If dtb is there, the devices will be created dynamically */
if (!of_have_populated_dt()) {
omap_init_control_usb();
omap_init_dmic();
omap_init_mcpdm();
omap_init_mcspi();
omap_init_sham();
omap_init_aes();
}
omap_init_sti();
omap_init_rng();
omap_init_vout();
omap_init_ocp2scp();
return 0;
}
omap_arch_initcall(omap2_init_devices);
| gpl-2.0 |
SM-G920P/arter-kernel | security/integrity/ima/ima_audit.c | 2296 | 1832 | /*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* File: integrity_audit.c
* Audit calls for the integrity subsystem
*/
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/audit.h>
#include "ima.h"
static int ima_audit;
/* ima_audit_setup - enable informational auditing messages */
static int __init ima_audit_setup(char *str)
{
unsigned long audit;
if (!strict_strtoul(str, 0, &audit))
ima_audit = audit ? 1 : 0;
return 1;
}
__setup("ima_audit=", ima_audit_setup);
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
const char *cause, int result, int audit_info)
{
struct audit_buffer *ab;
if (!ima_audit && audit_info == 1) /* Skip informational messages */
return;
ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
current->pid,
from_kuid(&init_user_ns, current_cred()->uid),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
audit_log_task_context(ab);
audit_log_format(ab, " op=");
audit_log_string(ab, op);
audit_log_format(ab, " cause=");
audit_log_string(ab, cause);
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, current->comm);
if (fname) {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, fname);
}
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
audit_log_format(ab, " res=%d", !result);
audit_log_end(ab);
}
| gpl-2.0 |
LegacyHuawei/android_kernel_huawei_msm7x30 | drivers/net/ethernet/3com/3c501.c | 4856 | 23842 | /* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */
/*
Written 1992,1993,1994 Donald Becker
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU General Public License,
incorporated herein by reference.
This is a device driver for the 3Com Etherlink 3c501.
Do not purchase this card, even as a joke. It's performance is horrible,
and it breaks in many ways.
The original author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
Fixed (again!) the missing interrupt locking on TX/RX shifting.
Alan Cox <alan@lxorguk.ukuu.org.uk>
Removed calls to init_etherdev since they are no longer needed, and
cleaned up modularization just a bit. The driver still allows only
the default address for cards when loaded as a module, but that's
really less braindead than anyone using a 3c501 board. :)
19950208 (invid@msen.com)
Added traps for interrupts hitting the window as we clear and TX load
the board. Now getting 150K/second FTP with a 3c501 card. Still playing
with a TX-TX optimisation to see if we can touch 180-200K/second as seems
theoretically maximum.
19950402 Alan Cox <alan@lxorguk.ukuu.org.uk>
Cleaned up for 2.3.x because we broke SMP now.
20000208 Alan Cox <alan@lxorguk.ukuu.org.uk>
Check up pass for 2.5. Nothing significant changed
20021009 Alan Cox <alan@lxorguk.ukuu.org.uk>
Fixed zero fill corner case
20030104 Alan Cox <alan@lxorguk.ukuu.org.uk>
For the avoidance of doubt the "preferred form" of this code is one which
is in an open non patent encumbered format. Where cryptographic key signing
forms part of the process of creating an executable the information
including keys needed to generate an equivalently functional executable
are deemed to be part of the source code.
*/
/**
* DOC: 3c501 Card Notes
*
* Some notes on this thing if you have to hack it. [Alan]
*
* Some documentation is available from 3Com. Due to the boards age
* standard responses when you ask for this will range from 'be serious'
* to 'give it to a museum'. The documentation is incomplete and mostly
* of historical interest anyway.
*
* The basic system is a single buffer which can be used to receive or
* transmit a packet. A third command mode exists when you are setting
* things up.
*
* If it's transmitting it's not receiving and vice versa. In fact the
* time to get the board back into useful state after an operation is
* quite large.
*
* The driver works by keeping the board in receive mode waiting for a
* packet to arrive. When one arrives it is copied out of the buffer
* and delivered to the kernel. The card is reloaded and off we go.
*
* When transmitting lp->txing is set and the card is reset (from
* receive mode) [possibly losing a packet just received] to command
* mode. A packet is loaded and transmit mode triggered. The interrupt
* handler runs different code for transmit interrupts and can handle
* returning to receive mode or retransmissions (yes you have to help
* out with those too).
*
* DOC: Problems
*
* There are a wide variety of undocumented error returns from the card
* and you basically have to kick the board and pray if they turn up. Most
* only occur under extreme load or if you do something the board doesn't
* like (eg touching a register at the wrong time).
*
* The driver is less efficient than it could be. It switches through
* receive mode even if more transmits are queued. If this worries you buy
* a real Ethernet card.
*
* The combination of slow receive restart and no real multicast
* filter makes the board unusable with a kernel compiled for IP
* multicasting in a real multicast environment. That's down to the board,
* but even with no multicast programs running a multicast IP kernel is
* in group 224.0.0.1 and you will therefore be listening to all multicasts.
* One nv conference running over that Ethernet and you can give up.
*
*/
#define DRV_NAME "3c501"
#define DRV_VERSION "2002/10/09"
static const char version[] =
DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n";
/*
* Braindamage remaining:
* The 3c501 board.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include "3c501.h"
/*
* The boilerplate probe code.
*/
static int io = 0x280;
static int irq = 5;
static int mem_start;
/**
* el1_probe: - probe for a 3c501
* @dev: The device structure passed in to probe.
*
* This can be called from two places. The network layer will probe using
* a device structure passed in with the probe information completed. For a
* modular driver we use #init_module to fill in our own structure and probe
* for it.
*
* Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to
* probe and failing to find anything.
*/
struct net_device * __init el1_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
static const unsigned ports[] = { 0x280, 0x300, 0};
const unsigned *port;
int err = 0;
if (!dev)
return ERR_PTR(-ENOMEM);
if (unit >= 0) {
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
io = dev->base_addr;
irq = dev->irq;
mem_start = dev->mem_start & 7;
}
if (io > 0x1ff) { /* Check a single specified location. */
err = el1_probe1(dev, io);
} else if (io != 0) {
err = -ENXIO; /* Don't probe at all. */
} else {
for (port = ports; *port && el1_probe1(dev, *port); port++)
;
if (!*port)
err = -ENODEV;
}
if (err)
goto out;
err = register_netdev(dev);
if (err)
goto out1;
return dev;
out1:
release_region(dev->base_addr, EL1_IO_EXTENT);
out:
free_netdev(dev);
return ERR_PTR(err);
}
static const struct net_device_ops el_netdev_ops = {
.ndo_open = el_open,
.ndo_stop = el1_close,
.ndo_start_xmit = el_start_xmit,
.ndo_tx_timeout = el_timeout,
.ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/**
* el1_probe1:
* @dev: The device structure to use
* @ioaddr: An I/O address to probe at.
*
* The actual probe. This is iterated over by #el1_probe in order to
* check all the applicable device locations.
*
* Returns 0 for a success, in which case the device is activated,
* EAGAIN if the IRQ is in use by another driver, and ENODEV if the
* board cannot be found.
*/
static int __init el1_probe1(struct net_device *dev, int ioaddr)
{
struct net_local *lp;
const char *mname; /* Vendor name */
unsigned char station_addr[6];
int autoirq = 0;
int i;
/*
* Reserve I/O resource for exclusive use by this driver
*/
if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME))
return -ENODEV;
/*
* Read the station address PROM data from the special port.
*/
for (i = 0; i < 6; i++) {
outw(i, ioaddr + EL1_DATAPTR);
station_addr[i] = inb(ioaddr + EL1_SAPROM);
}
/*
* Check the first three octets of the S.A. for 3Com's prefix, or
* for the Sager NP943 prefix.
*/
if (station_addr[0] == 0x02 && station_addr[1] == 0x60 &&
station_addr[2] == 0x8c)
mname = "3c501";
else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 &&
station_addr[2] == 0xC8)
mname = "NP943";
else {
release_region(ioaddr, EL1_IO_EXTENT);
return -ENODEV;
}
/*
* We auto-IRQ by shutting off the interrupt line and letting it
* float high.
*/
dev->irq = irq;
if (dev->irq < 2) {
unsigned long irq_mask;
irq_mask = probe_irq_on();
inb(RX_STATUS); /* Clear pending interrupts. */
inb(TX_STATUS);
outb(AX_LOOP + 1, AX_CMD);
outb(0x00, AX_CMD);
mdelay(20);
autoirq = probe_irq_off(irq_mask);
if (autoirq == 0) {
pr_warning("%s probe at %#x failed to detect IRQ line.\n",
mname, ioaddr);
release_region(ioaddr, EL1_IO_EXTENT);
return -EAGAIN;
}
}
outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
dev->base_addr = ioaddr;
memcpy(dev->dev_addr, station_addr, ETH_ALEN);
if (mem_start & 0xf)
el_debug = mem_start & 0x7;
if (autoirq)
dev->irq = autoirq;
pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n",
dev->name, mname, dev->base_addr,
autoirq ? "auto":"assigned ", dev->irq);
#ifdef CONFIG_IP_MULTICAST
pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
#endif
if (el_debug)
pr_debug("%s", version);
lp = netdev_priv(dev);
memset(lp, 0, sizeof(struct net_local));
spin_lock_init(&lp->lock);
/*
* The EL1-specific entries in the device structure.
*/
dev->netdev_ops = &el_netdev_ops;
dev->watchdog_timeo = HZ;
dev->ethtool_ops = &netdev_ethtool_ops;
return 0;
}
/**
* el1_open:
* @dev: device that is being opened
*
* When an ifconfig is issued which changes the device flags to include
* IFF_UP this function is called. It is only called when the change
* occurs, not when the interface remains up. #el1_close will be called
* when it goes down.
*
* Returns 0 for a successful open, or -EAGAIN if someone has run off
* with our interrupt line.
*/
static int el_open(struct net_device *dev)
{
int retval;
int ioaddr = dev->base_addr;
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
if (el_debug > 2)
pr_debug("%s: Doing el_open()...\n", dev->name);
retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev);
if (retval)
return retval;
spin_lock_irqsave(&lp->lock, flags);
el_reset(dev);
spin_unlock_irqrestore(&lp->lock, flags);
lp->txing = 0; /* Board in RX mode */
outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
netif_start_queue(dev);
return 0;
}
/**
* el_timeout:
* @dev: The 3c501 card that has timed out
*
* Attempt to restart the board. This is basically a mixture of extreme
* violence and prayer
*
*/
static void el_timeout(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
if (el_debug)
pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
dev->name, inb(TX_STATUS),
inb(AX_STATUS), inb(RX_STATUS));
dev->stats.tx_errors++;
outb(TX_NORM, TX_CMD);
outb(RX_NORM, RX_CMD);
outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
lp->txing = 0; /* Ripped back in to RX */
netif_wake_queue(dev);
}
/**
* el_start_xmit:
* @skb: The packet that is queued to be sent
* @dev: The 3c501 card we want to throw it down
*
* Attempt to send a packet to a 3c501 card. There are some interesting
* catches here because the 3c501 is an extremely old and therefore
* stupid piece of technology.
*
* If we are handling an interrupt on the other CPU we cannot load a packet
* as we may still be attempting to retrieve the last RX packet buffer.
*
* When a transmit times out we dump the card into control mode and just
* start again. It happens enough that it isn't worth logging.
*
* We avoid holding the spin locks when doing the packet load to the board.
* The device is very slow, and its DMA mode is even slower. If we held the
* lock while loading 1500 bytes onto the controller we would drop a lot of
* serial port characters. This requires we do extra locking, but we have
* no real choice.
*/
static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
unsigned long flags;
/*
* Avoid incoming interrupts between us flipping txing and flipping
* mode as the driver assumes txing is a faithful indicator of card
* state
*/
spin_lock_irqsave(&lp->lock, flags);
/*
* Avoid timer-based retransmission conflicts.
*/
netif_stop_queue(dev);
do {
int len = skb->len;
int pad = 0;
int gp_start;
unsigned char *buf = skb->data;
if (len < ETH_ZLEN)
pad = ETH_ZLEN - len;
gp_start = 0x800 - (len + pad);
lp->tx_pkt_start = gp_start;
lp->collisions = 0;
dev->stats.tx_bytes += skb->len;
/*
* Command mode with status cleared should [in theory]
* mean no more interrupts can be pending on the card.
*/
outb_p(AX_SYS, AX_CMD);
inb_p(RX_STATUS);
inb_p(TX_STATUS);
lp->loading = 1;
lp->txing = 1;
/*
* Turn interrupts back on while we spend a pleasant
* afternoon loading bytes into the board
*/
spin_unlock_irqrestore(&lp->lock, flags);
/* Set rx packet area to 0. */
outw(0x00, RX_BUF_CLR);
/* aim - packet will be loaded into buffer start */
outw(gp_start, GP_LOW);
/* load buffer (usual thing each byte increments the pointer) */
outsb(DATAPORT, buf, len);
if (pad) {
while (pad--) /* Zero fill buffer tail */
outb(0, DATAPORT);
}
/* the board reuses the same register */
outw(gp_start, GP_LOW);
if (lp->loading != 2) {
/* fire ... Trigger xmit. */
outb(AX_XMIT, AX_CMD);
lp->loading = 0;
if (el_debug > 2)
pr_debug(" queued xmit.\n");
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* A receive upset our load, despite our best efforts */
if (el_debug > 2)
pr_debug("%s: burped during tx load.\n", dev->name);
spin_lock_irqsave(&lp->lock, flags);
} while (1);
}
/**
* el_interrupt:
* @irq: Interrupt number
* @dev_id: The 3c501 that burped
*
* Handle the ether interface interrupts. The 3c501 needs a lot more
* hand holding than most cards. In particular we get a transmit interrupt
* with a collision error because the board firmware isn't capable of rewinding
* its own transmit buffer pointers. It can however count to 16 for us.
*
* On the receive side the card is also very dumb. It has no buffering to
* speak of. We simply pull the packet out of its PIO buffer (which is slow)
* and queue it for the kernel. Then we reset the card for the next packet.
*
* We sometimes get surprise interrupts late both because the SMP IRQ delivery
* is message passing and because the card sometimes seems to deliver late. I
* think if it is part way through a receive and the mode is changed it carries
* on receiving and sends us an interrupt. We have to band aid all these cases
* to get a sensible 150kBytes/second performance. Even then you want a small
* TCP window.
*/
static irqreturn_t el_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct net_local *lp;
int ioaddr;
int axsr; /* Aux. status reg. */
ioaddr = dev->base_addr;
lp = netdev_priv(dev);
spin_lock(&lp->lock);
/*
* What happened ?
*/
axsr = inb(AX_STATUS);
/*
* Log it
*/
if (el_debug > 3)
pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr);
if (lp->loading == 1 && !lp->txing)
pr_warning("%s: Inconsistent state loading while not in tx\n",
dev->name);
if (lp->txing) {
/*
* Board in transmit mode. May be loading. If we are
* loading we shouldn't have got this.
*/
int txsr = inb(TX_STATUS);
if (lp->loading == 1) {
if (el_debug > 2)
pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n",
dev->name, txsr, inw(GP_LOW), inw(RX_LOW));
/* Force a reload */
lp->loading = 2;
spin_unlock(&lp->lock);
goto out;
}
if (el_debug > 6)
pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name,
txsr, inw(GP_LOW), inw(RX_LOW));
if ((axsr & 0x80) && (txsr & TX_READY) == 0) {
/*
* FIXME: is there a logic to whether to keep
* on trying or reset immediately ?
*/
if (el_debug > 1)
pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n",
dev->name, txsr, axsr,
inw(ioaddr + EL1_DATAPTR),
inw(ioaddr + EL1_RXPTR));
lp->txing = 0;
netif_wake_queue(dev);
} else if (txsr & TX_16COLLISIONS) {
/*
* Timed out
*/
if (el_debug)
pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name);
outb(AX_SYS, AX_CMD);
lp->txing = 0;
dev->stats.tx_aborted_errors++;
netif_wake_queue(dev);
} else if (txsr & TX_COLLISION) {
/*
* Retrigger xmit.
*/
if (el_debug > 6)
pr_debug("%s: retransmitting after a collision.\n", dev->name);
/*
* Poor little chip can't reset its own start
* pointer
*/
outb(AX_SYS, AX_CMD);
outw(lp->tx_pkt_start, GP_LOW);
outb(AX_XMIT, AX_CMD);
dev->stats.collisions++;
spin_unlock(&lp->lock);
goto out;
} else {
/*
* It worked.. we will now fall through and receive
*/
dev->stats.tx_packets++;
if (el_debug > 6)
pr_debug("%s: Tx succeeded %s\n", dev->name,
(txsr & TX_RDY) ? "." : "but tx is busy!");
/*
* This is safe the interrupt is atomic WRT itself.
*/
lp->txing = 0;
/* In case more to transmit */
netif_wake_queue(dev);
}
} else {
/*
* In receive mode.
*/
int rxsr = inb(RX_STATUS);
if (el_debug > 5)
pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n",
dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW));
/*
* Just reading rx_status fixes most errors.
*/
if (rxsr & RX_MISSED)
dev->stats.rx_missed_errors++;
else if (rxsr & RX_RUNT) {
/* Handled to avoid board lock-up. */
dev->stats.rx_length_errors++;
if (el_debug > 5)
pr_debug("%s: runt.\n", dev->name);
} else if (rxsr & RX_GOOD) {
/*
* Receive worked.
*/
el_receive(dev);
} else {
/*
* Nothing? Something is broken!
*/
if (el_debug > 2)
pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
dev->name, rxsr);
el_reset(dev);
}
}
/*
* Move into receive mode
*/
outb(AX_RX, AX_CMD);
outw(0x00, RX_BUF_CLR);
inb(RX_STATUS); /* Be certain that interrupts are cleared. */
inb(TX_STATUS);
spin_unlock(&lp->lock);
out:
return IRQ_HANDLED;
}
/**
* el_receive:
* @dev: Device to pull the packets from
*
* We have a good packet. Well, not really "good", just mostly not broken.
* We must check everything to see if it is good. In particular we occasionally
* get wild packet sizes from the card. If the packet seems sane we PIO it
* off the card and queue it for the protocol layers.
*/
static void el_receive(struct net_device *dev)
{
int ioaddr = dev->base_addr;
int pkt_len;
struct sk_buff *skb;
pkt_len = inw(RX_LOW);
if (el_debug > 4)
pr_debug(" el_receive %d.\n", pkt_len);
if (pkt_len < 60 || pkt_len > 1536) {
if (el_debug)
pr_debug("%s: bogus packet, length=%d\n",
dev->name, pkt_len);
dev->stats.rx_over_errors++;
return;
}
/*
* Command mode so we can empty the buffer
*/
outb(AX_SYS, AX_CMD);
skb = netdev_alloc_skb(dev, pkt_len + 2);
/*
* Start of frame
*/
outw(0x00, GP_LOW);
if (skb == NULL) {
pr_info("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
return;
} else {
skb_reserve(skb, 2); /* Force 16 byte alignment */
/*
* The read increments through the bytes. The interrupt
* handler will fix the pointer when it returns to
* receive mode.
*/
insb(DATAPORT, skb_put(skb, pkt_len), pkt_len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
}
/**
* el_reset: Reset a 3c501 card
* @dev: The 3c501 card about to get zapped
*
* Even resetting a 3c501 isn't simple. When you activate reset it loses all
* its configuration. You must hold the lock when doing this. The function
* cannot take the lock itself as it is callable from the irq handler.
*/
static void el_reset(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
if (el_debug > 2)
pr_info("3c501 reset...\n");
outb(AX_RESET, AX_CMD); /* Reset the chip */
/* Aux control, irq and loopback enabled */
outb(AX_LOOP, AX_CMD);
{
int i;
for (i = 0; i < 6; i++) /* Set the station address. */
outb(dev->dev_addr[i], ioaddr + i);
}
outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
outb(RX_NORM, RX_CMD); /* Set Rx commands. */
inb(RX_STATUS); /* Clear status. */
inb(TX_STATUS);
lp->txing = 0;
}
/**
* el1_close:
* @dev: 3c501 card to shut down
*
* Close a 3c501 card. The IFF_UP flag has been cleared by the user via
* the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued,
* and then disable the interrupts. Finally we reset the chip. The effects
* of the rest will be cleaned up by #el1_open. Always returns 0 indicating
* a success.
*/
static int el1_close(struct net_device *dev)
{
int ioaddr = dev->base_addr;
if (el_debug > 2)
pr_info("%s: Shutting down Ethernet card at %#x.\n",
dev->name, ioaddr);
netif_stop_queue(dev);
/*
* Free and disable the IRQ.
*/
free_irq(dev->irq, dev);
outb(AX_RESET, AX_CMD); /* Reset the chip */
return 0;
}
/**
* set_multicast_list:
* @dev: The device to adjust
*
* Set or clear the multicast filter for this adaptor to use the best-effort
* filtering supported. The 3c501 supports only three modes of filtering.
* It always receives broadcasts and packets for itself. You can choose to
* optionally receive all packets, or all multicast packets on top of this.
*/
static void set_multicast_list(struct net_device *dev)
{
int ioaddr = dev->base_addr;
if (dev->flags & IFF_PROMISC) {
outb(RX_PROM, RX_CMD);
inb(RX_STATUS);
} else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
/* Multicast or all multicast is the same */
outb(RX_MULT, RX_CMD);
inb(RX_STATUS); /* Clear status. */
} else {
outb(RX_NORM, RX_CMD);
inb(RX_STATUS);
}
}
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
}
static u32 netdev_get_msglevel(struct net_device *dev)
{
return debug;
}
static void netdev_set_msglevel(struct net_device *dev, u32 level)
{
debug = level;
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
};
#ifdef MODULE
static struct net_device *dev_3c501;
module_param(io, int, 0);
module_param(irq, int, 0);
MODULE_PARM_DESC(io, "EtherLink I/O base address");
MODULE_PARM_DESC(irq, "EtherLink IRQ number");
/**
* init_module:
*
* When the driver is loaded as a module this function is called. We fake up
* a device structure with the base I/O and interrupt set as if it were being
* called from Space.c. This minimises the extra code that would otherwise
* be required.
*
* Returns 0 for success or -EIO if a card is not found. Returning an error
* here also causes the module to be unloaded
*/
int __init init_module(void)
{
dev_3c501 = el1_probe(-1);
if (IS_ERR(dev_3c501))
return PTR_ERR(dev_3c501);
return 0;
}
/**
* cleanup_module:
*
* The module is being unloaded. We unhook our network device from the system
* and then free up the resources we took when the card was found.
*/
void __exit cleanup_module(void)
{
struct net_device *dev = dev_3c501;
unregister_netdev(dev);
release_region(dev->base_addr, EL1_IO_EXTENT);
free_netdev(dev);
}
#endif /* MODULE */
MODULE_AUTHOR("Donald Becker, Alan Cox");
MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card");
MODULE_LICENSE("GPL");
| gpl-2.0 |
funky81/galaxy-2636 | net/ipx/ipx_route.c | 11768 | 6962 | /*
* Implements the IPX routing routines.
* Code moved from af_ipx.c.
*
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2003
*
* See net/ipx/ChangeLog.
*/
#include <linux/list.h>
#include <linux/route.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <net/ipx.h>
#include <net/sock.h>
LIST_HEAD(ipx_routes);
DEFINE_RWLOCK(ipx_routes_lock);
extern struct ipx_interface *ipx_internal_net;
extern __be16 ipx_cksum(struct ipxhdr *packet, int length);
extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
struct sk_buff *skb, int copy);
extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
struct sk_buff *skb, int copy);
extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb,
char *node);
extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
struct ipx_route *ipxrtr_lookup(__be32 net)
{
struct ipx_route *r;
read_lock_bh(&ipx_routes_lock);
list_for_each_entry(r, &ipx_routes, node)
if (r->ir_net == net) {
ipxrtr_hold(r);
goto unlock;
}
r = NULL;
unlock:
read_unlock_bh(&ipx_routes_lock);
return r;
}
/*
* Caller must hold a reference to intrfc
*/
int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
unsigned char *node)
{
struct ipx_route *rt;
int rc;
/* Get a route structure; either existing or create */
rt = ipxrtr_lookup(network);
if (!rt) {
rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
rc = -EAGAIN;
if (!rt)
goto out;
atomic_set(&rt->refcnt, 1);
ipxrtr_hold(rt);
write_lock_bh(&ipx_routes_lock);
list_add(&rt->node, &ipx_routes);
write_unlock_bh(&ipx_routes_lock);
} else {
rc = -EEXIST;
if (intrfc == ipx_internal_net)
goto out_put;
}
rt->ir_net = network;
rt->ir_intrfc = intrfc;
if (!node) {
memset(rt->ir_router_node, '\0', IPX_NODE_LEN);
rt->ir_routed = 0;
} else {
memcpy(rt->ir_router_node, node, IPX_NODE_LEN);
rt->ir_routed = 1;
}
rc = 0;
out_put:
ipxrtr_put(rt);
out:
return rc;
}
void ipxrtr_del_routes(struct ipx_interface *intrfc)
{
struct ipx_route *r, *tmp;
write_lock_bh(&ipx_routes_lock);
list_for_each_entry_safe(r, tmp, &ipx_routes, node)
if (r->ir_intrfc == intrfc) {
list_del(&r->node);
ipxrtr_put(r);
}
write_unlock_bh(&ipx_routes_lock);
}
static int ipxrtr_create(struct ipx_route_definition *rd)
{
struct ipx_interface *intrfc;
int rc = -ENETUNREACH;
/* Find the appropriate interface */
intrfc = ipxitf_find_using_net(rd->ipx_router_network);
if (!intrfc)
goto out;
rc = ipxrtr_add_route(rd->ipx_network, intrfc, rd->ipx_router_node);
ipxitf_put(intrfc);
out:
return rc;
}
static int ipxrtr_delete(__be32 net)
{
struct ipx_route *r, *tmp;
int rc;
write_lock_bh(&ipx_routes_lock);
list_for_each_entry_safe(r, tmp, &ipx_routes, node)
if (r->ir_net == net) {
/* Directly connected; can't lose route */
rc = -EPERM;
if (!r->ir_routed)
goto out;
list_del(&r->node);
ipxrtr_put(r);
rc = 0;
goto out;
}
rc = -ENOENT;
out:
write_unlock_bh(&ipx_routes_lock);
return rc;
}
/*
* The skb has to be unshared, we'll end up calling ipxitf_send, that'll
* modify the packet
*/
int ipxrtr_route_skb(struct sk_buff *skb)
{
struct ipxhdr *ipx = ipx_hdr(skb);
struct ipx_route *r = ipxrtr_lookup(IPX_SKB_CB(skb)->ipx_dest_net);
if (!r) { /* no known route */
kfree_skb(skb);
return 0;
}
ipxitf_hold(r->ir_intrfc);
ipxitf_send(r->ir_intrfc, skb, r->ir_routed ?
r->ir_router_node : ipx->ipx_dest.node);
ipxitf_put(r->ir_intrfc);
ipxrtr_put(r);
return 0;
}
/*
* Route an outgoing frame from a socket.
*/
int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
struct iovec *iov, size_t len, int noblock)
{
struct sk_buff *skb;
struct ipx_sock *ipxs = ipx_sk(sk);
struct ipx_interface *intrfc;
struct ipxhdr *ipx;
size_t size;
int ipx_offset;
struct ipx_route *rt = NULL;
int rc;
/* Find the appropriate interface on which to send packet */
if (!usipx->sipx_network && ipx_primary_net) {
usipx->sipx_network = ipx_primary_net->if_netnum;
intrfc = ipx_primary_net;
} else {
rt = ipxrtr_lookup(usipx->sipx_network);
rc = -ENETUNREACH;
if (!rt)
goto out;
intrfc = rt->ir_intrfc;
}
ipxitf_hold(intrfc);
ipx_offset = intrfc->if_ipx_offset;
size = sizeof(struct ipxhdr) + len + ipx_offset;
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
if (!skb)
goto out_put;
skb_reserve(skb, ipx_offset);
skb->sk = sk;
/* Fill in IPX header */
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb_put(skb, sizeof(struct ipxhdr));
ipx = ipx_hdr(skb);
ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr));
IPX_SKB_CB(skb)->ipx_tctrl = 0;
ipx->ipx_type = usipx->sipx_type;
IPX_SKB_CB(skb)->last_hop.index = -1;
#ifdef CONFIG_IPX_INTERN
IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum;
memcpy(ipx->ipx_source.node, ipxs->node, IPX_NODE_LEN);
#else
rc = ntohs(ipxs->port);
if (rc == 0x453 || rc == 0x452) {
/* RIP/SAP special handling for mars_nwe */
IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum;
memcpy(ipx->ipx_source.node, intrfc->if_node, IPX_NODE_LEN);
} else {
IPX_SKB_CB(skb)->ipx_source_net = ipxs->intrfc->if_netnum;
memcpy(ipx->ipx_source.node, ipxs->intrfc->if_node,
IPX_NODE_LEN);
}
#endif /* CONFIG_IPX_INTERN */
ipx->ipx_source.sock = ipxs->port;
IPX_SKB_CB(skb)->ipx_dest_net = usipx->sipx_network;
memcpy(ipx->ipx_dest.node, usipx->sipx_node, IPX_NODE_LEN);
ipx->ipx_dest.sock = usipx->sipx_port;
rc = memcpy_fromiovec(skb_put(skb, len), iov, len);
if (rc) {
kfree_skb(skb);
goto out_put;
}
/* Apply checksum. Not allowed on 802.3 links. */
if (sk->sk_no_check || intrfc->if_dlink_type == htons(IPX_FRAME_8023))
ipx->ipx_checksum = htons(0xFFFF);
else
ipx->ipx_checksum = ipx_cksum(ipx, len + sizeof(struct ipxhdr));
rc = ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ?
rt->ir_router_node : ipx->ipx_dest.node);
out_put:
ipxitf_put(intrfc);
if (rt)
ipxrtr_put(rt);
out:
return rc;
}
/*
* We use a normal struct rtentry for route handling
*/
int ipxrtr_ioctl(unsigned int cmd, void __user *arg)
{
struct rtentry rt; /* Use these to behave like 'other' stacks */
struct sockaddr_ipx *sg, *st;
int rc = -EFAULT;
if (copy_from_user(&rt, arg, sizeof(rt)))
goto out;
sg = (struct sockaddr_ipx *)&rt.rt_gateway;
st = (struct sockaddr_ipx *)&rt.rt_dst;
rc = -EINVAL;
if (!(rt.rt_flags & RTF_GATEWAY) || /* Direct routes are fixed */
sg->sipx_family != AF_IPX ||
st->sipx_family != AF_IPX)
goto out;
switch (cmd) {
case SIOCDELRT:
rc = ipxrtr_delete(st->sipx_network);
break;
case SIOCADDRT: {
struct ipx_route_definition f;
f.ipx_network = st->sipx_network;
f.ipx_router_network = sg->sipx_network;
memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN);
rc = ipxrtr_create(&f);
break;
}
}
out:
return rc;
}
| gpl-2.0 |
pjh/linux-stable | drivers/uwb/ie-rcv.c | 12792 | 1632 | /*
* Ultra Wide Band
* IE Received notification handling.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitmap.h>
#include "uwb-internal.h"
/*
* Process an incoming IE Received notification.
*/
int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt)
{
int result = -EINVAL;
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_ie_rcv *iercv;
size_t iesize;
/* Is there enough data to decode it? */
if (evt->notif.size < sizeof(*iercv)) {
dev_err(dev, "IE Received notification: Not enough data to "
"decode (%zu vs %zu bytes needed)\n",
evt->notif.size, sizeof(*iercv));
goto error;
}
iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb);
iesize = le16_to_cpu(iercv->wIELength);
dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]);
if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) {
dev_warn(dev, "unhandled Relinquish Request IE\n");
}
return 0;
error:
return result;
}
| gpl-2.0 |
magyarm/bluetooth-next | drivers/misc/sgi-xp/xpc_partition.c | 13304 | 14014 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) partition support.
*
* This is the part of XPC that detects the presence/absence of
* other partitions. It provides a heartbeat and monitors the
* heartbeats of other partitions.
*
*/
#include <linux/device.h>
#include <linux/hardirq.h>
#include <linux/slab.h>
#include "xpc.h"
#include <asm/uv/uv_hub.h>
/* XPC is exiting flag */
int xpc_exiting;
/* this partition's reserved page pointers */
struct xpc_rsvd_page *xpc_rsvd_page;
static unsigned long *xpc_part_nasids;
unsigned long *xpc_mach_nasids;
static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */
int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */
struct xpc_partition *xpc_partitions;
/*
* Guarantee that the kmalloc'd memory is cacheline aligned.
*/
void *
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags);
if (*base == NULL)
return NULL;
if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
return *base;
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kmalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL)
return NULL;
return (void *)L1_CACHE_ALIGN((u64)*base);
}
/*
* Given a nasid, get the physical address of the partition's reserved page
* for that nasid. This function returns 0 on any error.
*/
static unsigned long
xpc_get_rsvd_page_pa(int nasid)
{
enum xp_retval ret;
u64 cookie = 0;
unsigned long rp_pa = nasid; /* seed with nasid */
size_t len = 0;
size_t buf_len = 0;
void *buf = buf;
void *buf_base = NULL;
enum xp_retval (*get_partition_rsvd_page_pa)
(void *, u64 *, unsigned long *, size_t *) =
xpc_arch_ops.get_partition_rsvd_page_pa;
while (1) {
/* !!! rp_pa will need to be _gpa on UV.
* ??? So do we save it into the architecture specific parts
* ??? of the xpc_partition structure? Do we rename this
* ??? function or have two versions? Rename rp_pa for UV to
* ??? rp_gpa?
*/
ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
"address=0x%016lx, len=0x%016lx\n", ret,
(unsigned long)cookie, rp_pa, len);
if (ret != xpNeedMoreInfo)
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
if (is_shub())
len = L1_CACHE_ALIGN(len);
if (len > buf_len) {
if (buf_base != NULL)
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
if (buf_base == NULL) {
dev_err(xpc_part, "unable to kmalloc "
"len=0x%016lx\n", buf_len);
ret = xpNoMemory;
break;
}
}
ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
}
}
kfree(buf_base);
if (ret != xpSuccess)
rp_pa = 0;
dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
return rp_pa;
}
/*
* Fill the partition reserved page with the information needed by
* other partitions to discover we are alive and establish initial
* communications.
*/
int
xpc_setup_rsvd_page(void)
{
int ret;
struct xpc_rsvd_page *rp;
unsigned long rp_pa;
unsigned long new_ts_jiffies;
/* get the local reserved page's address */
preempt_disable();
rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id()));
preempt_enable();
if (rp_pa == 0) {
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return -ESRCH;
}
rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
if (rp->SAL_version < 3) {
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
rp->SAL_partid &= 0xff;
}
BUG_ON(rp->SAL_partid != xp_partition_id);
if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) {
dev_err(xpc_part, "the reserved page's partid of %d is outside "
"supported range (< 0 || >= %d)\n", rp->SAL_partid,
xp_max_npartitions);
return -EINVAL;
}
rp->version = XPC_RP_VERSION;
rp->max_npartitions = xp_max_npartitions;
/* establish the actual sizes of the nasid masks */
if (rp->SAL_version == 1) {
/* SAL_version 1 didn't set the nasids_size field */
rp->SAL_nasids_size = 128;
}
xpc_nasid_mask_nbytes = rp->SAL_nasids_size;
xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size *
BITS_PER_BYTE);
/* setup the pointers to the various items in the reserved page */
xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
ret = xpc_arch_ops.setup_rsvd_page(rp);
if (ret != 0)
return ret;
/*
* Set timestamp of when reserved page was setup by XPC.
* This signifies to the remote partition that our reserved
* page is initialized.
*/
new_ts_jiffies = jiffies;
if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies)
new_ts_jiffies++;
rp->ts_jiffies = new_ts_jiffies;
xpc_rsvd_page = rp;
return 0;
}
void
xpc_teardown_rsvd_page(void)
{
/* a zero timestamp indicates our rsvd page is not initialized */
xpc_rsvd_page->ts_jiffies = 0;
}
/*
* Get a copy of a portion of the remote partition's rsvd page.
*
* remote_rp points to a buffer that is cacheline aligned for BTE copies and
* is large enough to contain a copy of their reserved page header and
* part_nasids mask.
*/
enum xp_retval
xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
{
int l;
enum xp_retval ret;
/* get the reserved page's physical address */
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
if (*remote_rp_pa == 0)
return xpNoRsvdPageAddr;
/* pull over the reserved page header and part_nasids mask */
ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
if (ret != xpSuccess)
return ret;
if (discovered_nasids != NULL) {
unsigned long *remote_part_nasids =
XPC_RP_PART_NASIDS(remote_rp);
for (l = 0; l < xpc_nasid_mask_nlongs; l++)
discovered_nasids[l] |= remote_part_nasids[l];
}
/* zero timestamp indicates the reserved page has not been setup */
if (remote_rp->ts_jiffies == 0)
return xpRsvdPageNotSet;
if (XPC_VERSION_MAJOR(remote_rp->version) !=
XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
return xpBadVersion;
}
/* check that both remote and local partids are valid for each side */
if (remote_rp->SAL_partid < 0 ||
remote_rp->SAL_partid >= xp_max_npartitions ||
remote_rp->max_npartitions <= xp_partition_id) {
return xpInvalidPartid;
}
if (remote_rp->SAL_partid == xp_partition_id)
return xpLocalPartid;
return xpSuccess;
}
/*
* See if the other side has responded to a partition deactivate request
* from us. Though we requested the remote partition to deactivate with regard
* to us, we really only need to wait for the other side to disengage from us.
*/
int
xpc_partition_disengaged(struct xpc_partition *part)
{
short partid = XPC_PARTID(part);
int disengaged;
disengaged = !xpc_arch_ops.partition_engaged(partid);
if (part->disengage_timeout) {
if (!disengaged) {
if (time_is_after_jiffies(part->disengage_timeout)) {
/* timelimit hasn't been reached yet */
return 0;
}
/*
* Other side hasn't responded to our deactivate
* request in a timely fashion, so assume it's dead.
*/
dev_info(xpc_part, "deactivate request to remote "
"partition %d timed out\n", partid);
xpc_disengage_timedout = 1;
xpc_arch_ops.assume_partition_disengaged(partid);
disengaged = 1;
}
part->disengage_timeout = 0;
/* cancel the timer function, provided it's not us */
if (!in_interrupt())
del_singleshot_timer_sync(&part->disengage_timer);
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
part->act_state != XPC_P_AS_INACTIVE);
if (part->act_state != XPC_P_AS_INACTIVE)
xpc_wakeup_channel_mgr(part);
xpc_arch_ops.cancel_partition_deactivation_request(part);
}
return disengaged;
}
/*
* Mark specified partition as active.
*/
enum xp_retval
xpc_mark_partition_active(struct xpc_partition *part)
{
unsigned long irq_flags;
enum xp_retval ret;
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_AS_ACTIVATING) {
part->act_state = XPC_P_AS_ACTIVE;
ret = xpSuccess;
} else {
DBUG_ON(part->reason == xpSuccess);
ret = part->reason;
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
return ret;
}
/*
* Start the process of deactivating the specified partition.
*/
void
xpc_deactivate_partition(const int line, struct xpc_partition *part,
enum xp_retval reason)
{
unsigned long irq_flags;
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_AS_INACTIVE) {
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
if (reason == xpReactivating) {
/* we interrupt ourselves to reactivate partition */
xpc_arch_ops.request_partition_reactivation(part);
}
return;
}
if (part->act_state == XPC_P_AS_DEACTIVATING) {
if ((part->reason == xpUnloading && reason != xpUnloading) ||
reason == xpReactivating) {
XPC_SET_REASON(part, reason, line);
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
return;
}
part->act_state = XPC_P_AS_DEACTIVATING;
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
/* ask remote partition to deactivate with regard to us */
xpc_arch_ops.request_partition_deactivation(part);
/* set a timelimit on the disengage phase of the deactivation request */
part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
part->disengage_timer.expires = part->disengage_timeout;
add_timer(&part->disengage_timer);
dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
XPC_PARTID(part), reason);
xpc_partition_going_down(part, reason);
}
/*
* Mark specified partition as inactive.
*/
void
xpc_mark_partition_inactive(struct xpc_partition *part)
{
unsigned long irq_flags;
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_AS_INACTIVE;
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
}
/*
* SAL has provided a partition and machine mask. The partition mask
* contains a bit for each even nasid in our partition. The machine
* mask contains a bit for each even nasid in the entire machine.
*
* Using those two bit arrays, we can determine which nasids are
* known in the machine. Each should also have a reserved page
* initialized if they are available for partitioning.
*/
void
xpc_discovery(void)
{
void *remote_rp_base;
struct xpc_rsvd_page *remote_rp;
unsigned long remote_rp_pa;
int region;
int region_size;
int max_regions;
int nasid;
struct xpc_rsvd_page *rp;
unsigned long *discovered_nasids;
enum xp_retval ret;
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
xpc_nasid_mask_nbytes,
GFP_KERNEL, &remote_rp_base);
if (remote_rp == NULL)
return;
discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs,
GFP_KERNEL);
if (discovered_nasids == NULL) {
kfree(remote_rp_base);
return;
}
rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
/*
* The term 'region' in this context refers to the minimum number of
* nodes that can comprise an access protection grouping. The access
* protection is in regards to memory, IOI and IPI.
*/
region_size = xp_region_size;
if (is_uv())
max_regions = 256;
else {
max_regions = 64;
switch (region_size) {
case 128:
max_regions *= 2;
case 64:
max_regions *= 2;
case 32:
max_regions *= 2;
region_size = 16;
DBUG_ON(!is_shub2());
}
}
for (region = 0; region < max_regions; region++) {
if (xpc_exiting)
break;
dev_dbg(xpc_part, "searching region %d\n", region);
for (nasid = (region * region_size * 2);
nasid < ((region + 1) * region_size * 2); nasid += 2) {
if (xpc_exiting)
break;
dev_dbg(xpc_part, "checking nasid %d\n", nasid);
if (test_bit(nasid / 2, xpc_part_nasids)) {
dev_dbg(xpc_part, "PROM indicates Nasid %d is "
"part of the local partition; skipping "
"region\n", nasid);
break;
}
if (!(test_bit(nasid / 2, xpc_mach_nasids))) {
dev_dbg(xpc_part, "PROM indicates Nasid %d was "
"not on Numa-Link network at reset\n",
nasid);
continue;
}
if (test_bit(nasid / 2, discovered_nasids)) {
dev_dbg(xpc_part, "Nasid %d is part of a "
"partition which was previously "
"discovered\n", nasid);
continue;
}
/* pull over the rsvd page header & part_nasids mask */
ret = xpc_get_remote_rp(nasid, discovered_nasids,
remote_rp, &remote_rp_pa);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid,
ret);
if (ret == xpLocalPartid)
break;
continue;
}
xpc_arch_ops.request_partition_activation(remote_rp,
remote_rp_pa, nasid);
}
}
kfree(discovered_nasids);
kfree(remote_rp_base);
}
/*
* Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page.
*/
enum xp_retval
xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
{
struct xpc_partition *part;
unsigned long part_nasid_pa;
part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0)
return xpPartitionDown;
memset(nasid_mask, 0, xpc_nasid_mask_nbytes);
part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);
return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
xpc_nasid_mask_nbytes);
}
| gpl-2.0 |
myjang0507/Polaris | drivers/net/wireless/bcmdhd4358/dhd_linux_wq.c | 249 | 8808 | /*
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
* Copyright (C) 1999-2015, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_linux_wq.c 449578 2014-01-17 13:53:20Z $
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/ip.h>
#include <linux/kfifo.h>
#include <linuxver.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <bcmdevs.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_dbg.h>
#include <dhd_linux_wq.h>
struct dhd_deferred_event_t {
u8 event; /* holds the event */
void *event_data; /* Holds event specific data */
event_handler_t event_handler;
};
#define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t)
struct dhd_deferred_wq {
struct work_struct deferred_work; /* should be the first member */
/*
* work events may occur simultaneously.
* Can hold upto 64 low priority events and 4 high priority events
*/
#define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t))
#define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t))
struct kfifo *prio_fifo;
struct kfifo *work_fifo;
u8 *prio_fifo_buf;
u8 *work_fifo_buf;
spinlock_t work_lock;
void *dhd_info; /* review: does it require */
};
static inline struct kfifo*
dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
{
struct kfifo *fifo;
gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
fifo = kfifo_init(buf, size, flags, lock);
#else
fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
if (!fifo) {
return NULL;
}
kfifo_init(fifo, buf, size);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
return fifo;
}
static inline void
dhd_kfifo_free(struct kfifo *fifo)
{
kfifo_free(fifo);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
/* FC11 releases the fifo memory */
kfree(fifo);
#endif
}
/* deferred work functions */
static void dhd_deferred_work_handler(struct work_struct *data);
void*
dhd_deferred_work_init(void *dhd_info)
{
struct dhd_deferred_wq *work = NULL;
u8* buf;
unsigned long fifo_size = 0;
gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
if (!dhd_info) {
DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
goto return_null;
}
work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
flags);
if (!work) {
DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__));
goto return_null;
}
INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
/* initialize event fifo */
spin_lock_init(&work->work_lock);
/* allocate buffer to hold prio events */
fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
buf = (u8*)kzalloc(fifo_size, flags);
if (!buf) {
DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__));
goto return_null;
}
/* Initialize prio event fifo */
work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
if (!work->prio_fifo) {
kfree(buf);
goto return_null;
}
/* allocate buffer to hold work events */
fifo_size = DHD_WORK_FIFO_SIZE;
fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
buf = (u8*)kzalloc(fifo_size, flags);
if (!buf) {
DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__));
goto return_null;
}
/* Initialize event fifo */
work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
if (!work->work_fifo) {
kfree(buf);
goto return_null;
}
work->dhd_info = dhd_info;
DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__));
return work;
return_null:
if (work)
dhd_deferred_work_deinit(work);
return NULL;
}
void
dhd_deferred_work_deinit(void *work)
{
struct dhd_deferred_wq *deferred_work = work;
if (!deferred_work) {
DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__));
return;
}
/* cancel the deferred work handling */
cancel_work_sync((struct work_struct *)deferred_work);
/*
* free work event fifo.
* kfifo_free frees locally allocated fifo buffer
*/
if (deferred_work->prio_fifo)
dhd_kfifo_free(deferred_work->prio_fifo);
if (deferred_work->work_fifo)
dhd_kfifo_free(deferred_work->work_fifo);
kfree(deferred_work);
}
/*
* Prepares event to be queued
* Schedules the event
*/
int
dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
event_handler_t event_handler, u8 priority)
{
struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq;
struct dhd_deferred_event_t deferred_event;
int status;
if (!deferred_wq) {
DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
ASSERT(0);
return DHD_WQ_STS_UNINITIALIZED;
}
if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
DHD_ERROR(("%s: Unknown event \n", __FUNCTION__));
return DHD_WQ_STS_UNKNOWN_EVENT;
}
/*
* default element size is 1, which can be changed
* using kfifo_esize(). Older kernel(FC11) doesn't support
* changing element size. For compatibility changing
* element size is not prefered
*/
ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
deferred_event.event = event;
deferred_event.event_data = event_data;
deferred_event.event_handler = event_handler;
if (priority == DHD_WORK_PRIORITY_HIGH) {
status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event,
DEFRD_EVT_SIZE, &deferred_wq->work_lock);
} else {
status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event,
DEFRD_EVT_SIZE, &deferred_wq->work_lock);
}
if (!status) {
return DHD_WQ_STS_SCHED_FAILED;
}
schedule_work((struct work_struct *)deferred_wq);
return DHD_WQ_STS_OK;
}
static int
dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event)
{
int status = 0;
if (!deferred_wq) {
DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
return DHD_WQ_STS_UNINITIALIZED;
}
/*
* default element size is 1 byte, which can be changed
* using kfifo_esize(). Older kernel(FC11) doesn't support
* changing element size. For compatibility changing
* element size is not prefered
*/
ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
/* first read priorit event fifo */
status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event,
DEFRD_EVT_SIZE, &deferred_wq->work_lock);
if (!status) {
/* priority fifo is empty. Now read low prio work fifo */
status = kfifo_out_spinlocked(deferred_wq->work_fifo, event,
DEFRD_EVT_SIZE, &deferred_wq->work_lock);
}
return status;
}
/*
* Called when work is scheduled
*/
static void
dhd_deferred_work_handler(struct work_struct *work)
{
struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
struct dhd_deferred_event_t work_event;
int status;
if (!deferred_work) {
DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
return;
}
do {
status = dhd_get_scheduled_work(deferred_work, &work_event);
DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status));
if (!status) {
DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status));
break;
}
if (work_event.event > DHD_MAX_WQ_EVENTS) {
DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event));
break;
}
if (work_event.event_handler) {
work_event.event_handler(deferred_work->dhd_info,
work_event.event_data, work_event.event);
} else {
DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event));
}
} while (1);
return;
}
| gpl-2.0 |
vasudev-33/secureModule | drivers/staging/comedi/drivers/ke_counter.c | 249 | 4467 | /*
comedi/drivers/ke_counter.c
Comedi driver for Kolter-Electronic PCI Counter 1 Card
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/*
Driver: ke_counter
Description: Driver for Kolter Electronic Counter Card
Devices: [Kolter Electronic] PCI Counter Card (ke_counter)
Author: Michael Hillmann
Updated: Mon, 14 Apr 2008 15:42:42 +0100
Status: tested
Configuration Options: not applicable, uses PCI auto config
This driver is a simple driver to read the counter values from
Kolter Electronic PCI Counter Card.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
#define CNT_CARD_DEVICE_ID 0x0014
/*-- counter write ----------------------------------------------------------*/
/* This should be used only for resetting the counters; maybe it is better
to make a special command 'reset'. */
static int cnt_winsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
outb((unsigned char)((data[0] >> 24) & 0xff),
dev->iobase + chan * 0x20 + 0x10);
outb((unsigned char)((data[0] >> 16) & 0xff),
dev->iobase + chan * 0x20 + 0x0c);
outb((unsigned char)((data[0] >> 8) & 0xff),
dev->iobase + chan * 0x20 + 0x08);
outb((unsigned char)((data[0] >> 0) & 0xff),
dev->iobase + chan * 0x20 + 0x04);
/* return the number of samples written */
return 1;
}
/*-- counter read -----------------------------------------------------------*/
static int cnt_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
unsigned char a0, a1, a2, a3, a4;
int chan = CR_CHAN(insn->chanspec);
int result;
a0 = inb(dev->iobase + chan * 0x20);
a1 = inb(dev->iobase + chan * 0x20 + 0x04);
a2 = inb(dev->iobase + chan * 0x20 + 0x08);
a3 = inb(dev->iobase + chan * 0x20 + 0x0c);
a4 = inb(dev->iobase + chan * 0x20 + 0x10);
result = (a1 + (a2 * 256) + (a3 * 65536));
if (a4 > 0)
result = result - s->maxdata;
*data = (unsigned int)result;
/* return the number of samples read */
return 1;
}
static int cnt_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
int ret;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 0);
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
s = &dev->subdevices[0];
dev->read_subdev = s;
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE /* | SDF_COMMON */ ;
s->n_chan = 3;
s->maxdata = 0x00ffffff;
s->insn_read = cnt_rinsn;
s->insn_write = cnt_winsn;
/* select 20MHz clock */
outb(3, dev->iobase + 248);
/* reset all counters */
outb(0, dev->iobase);
outb(0, dev->iobase + 0x20);
outb(0, dev->iobase + 0x40);
dev_info(dev->class_dev, "%s: %s attached\n",
dev->driver->driver_name, dev->board_name);
return 0;
}
static struct comedi_driver ke_counter_driver = {
.driver_name = "ke_counter",
.module = THIS_MODULE,
.auto_attach = cnt_auto_attach,
.detach = comedi_pci_disable,
};
static int ke_counter_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &ke_counter_driver,
id->driver_data);
}
static const struct pci_device_id ke_counter_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ke_counter_pci_table);
static struct pci_driver ke_counter_pci_driver = {
.name = "ke_counter",
.id_table = ke_counter_pci_table,
.probe = ke_counter_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ke_counter_driver, ke_counter_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
houzhenggang/bcm63xx-next | drivers/staging/rtl8188eu/os_dep/xmit_linux.c | 249 | 7304 | /******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _XMIT_OSDEP_C_
#include <linux/version.h>
#include <osdep_service.h>
#include <drv_types.h>
#include <if_ether.h>
#include <ip.h>
#include <wifi.h>
#include <mlme_osdep.h>
#include <xmit_osdep.h>
#include <osdep_intf.h>
#include <usb_osintf.h>
uint rtw_remainder_len(struct pkt_file *pfile)
{
return pfile->buf_len - ((size_t)(pfile->cur_addr) -
(size_t)(pfile->buf_start));
}
void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
{
_func_enter_;
pfile->pkt = pktptr;
pfile->cur_addr = pktptr->data;
pfile->buf_start = pktptr->data;
pfile->pkt_len = pktptr->len;
pfile->buf_len = pktptr->len;
pfile->cur_buffer = pfile->buf_start;
_func_exit_;
}
uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
{
uint len = 0;
_func_enter_;
len = rtw_remainder_len(pfile);
len = (rlen > len) ? len : rlen;
if (rmem)
skb_copy_bits(pfile->pkt, pfile->buf_len-pfile->pkt_len, rmem, len);
pfile->cur_addr += len;
pfile->pkt_len -= len;
_func_exit_;
return len;
}
int rtw_endofpktfile(struct pkt_file *pfile)
{
_func_enter_;
if (pfile->pkt_len == 0) {
_func_exit_;
return true;
}
_func_exit_;
return false;
}
void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib)
{
}
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
{
int i;
pxmitbuf->pallocated_buf = rtw_zmalloc(alloc_sz);
if (pxmitbuf->pallocated_buf == NULL)
return _FAIL;
pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
pxmitbuf->dma_transfer_addr = 0;
for (i = 0; i < 8; i++) {
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
if (pxmitbuf->pxmit_urb[i] == NULL) {
DBG_88E("pxmitbuf->pxmit_urb[i]==NULL");
return _FAIL;
}
}
return _SUCCESS;
}
void rtw_os_xmit_resource_free(struct adapter *padapter,
struct xmit_buf *pxmitbuf, u32 free_sz)
{
int i;
for (i = 0; i < 8; i++)
usb_free_urb(pxmitbuf->pxmit_urb[i]);
kfree(pxmitbuf->pallocated_buf);
}
#define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
u16 queue;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
queue = skb_get_queue_mapping(pkt);
if (padapter->registrypriv.wifi_spec) {
if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
(pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
netif_wake_subqueue(padapter->pnetdev, queue);
} else {
if (__netif_subqueue_stopped(padapter->pnetdev, queue))
netif_wake_subqueue(padapter->pnetdev, queue);
}
#else
if (netif_queue_stopped(padapter->pnetdev))
netif_wake_queue(padapter->pnetdev);
#endif
dev_kfree_skb_any(pkt);
}
void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
{
if (pxframe->pkt)
rtw_os_pkt_complete(padapter, pxframe->pkt);
pxframe->pkt = NULL;
}
void rtw_os_xmit_schedule(struct adapter *padapter)
{
struct xmit_priv *pxmitpriv;
if (!padapter)
return;
pxmitpriv = &padapter->xmitpriv;
spin_lock_bh(&pxmitpriv->lock);
if (rtw_txframes_pending(padapter))
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
spin_unlock_bh(&pxmitpriv->lock);
}
static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
{
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u16 queue;
queue = skb_get_queue_mapping(pkt);
if (padapter->registrypriv.wifi_spec) {
/* No free space for Tx, tx_worker is too slow */
if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
netif_stop_subqueue(padapter->pnetdev, queue);
} else {
if (pxmitpriv->free_xmitframe_cnt <= 4) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
netif_stop_subqueue(padapter->pnetdev, queue);
}
}
}
static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
{
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct list_head *phead, *plist;
struct sk_buff *newskb;
struct sta_info *psta = NULL;
s32 res;
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
/* free sta asoc_queue */
while (!rtw_end_of_queue_search(phead, plist)) {
psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
plist = get_next(plist);
/* avoid come from STA1 and send back STA1 */
if (!memcmp(psta->hwaddr, &skb->data[6], 6))
continue;
newskb = skb_copy(skb, GFP_ATOMIC);
if (newskb) {
memcpy(newskb->data, psta->hwaddr, 6);
res = rtw_xmit(padapter, &newskb);
if (res < 0) {
DBG_88E("%s()-%d: rtw_xmit() return error!\n", __func__, __LINE__);
pxmitpriv->tx_drop++;
dev_kfree_skb_any(newskb);
} else {
pxmitpriv->tx_pkts++;
}
} else {
DBG_88E("%s-%d: skb_copy() failed!\n", __func__, __LINE__);
pxmitpriv->tx_drop++;
spin_unlock_bh(&pstapriv->asoc_list_lock);
return false; /* Caller shall tx this multicast frame via normal way. */
}
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
dev_kfree_skb_any(skb);
return true;
}
int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
s32 res = 0;
_func_enter_;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
if (rtw_if_up(padapter) == false) {
RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit_entry: rtw_if_up fail\n"));
goto drop_packet;
}
rtw_check_xmit_resource(padapter, pkt);
if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
(IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
(padapter->registrypriv.wifi_spec == 0)) {
if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME/4)) {
res = rtw_mlcst2unicst(padapter, pkt);
if (res)
goto exit;
}
}
res = rtw_xmit(padapter, &pkt);
if (res < 0)
goto drop_packet;
pxmitpriv->tx_pkts++;
RT_TRACE(_module_xmit_osdep_c_, _drv_info_, ("rtw_xmit_entry: tx_pkts=%d\n", (u32)pxmitpriv->tx_pkts));
goto exit;
drop_packet:
pxmitpriv->tx_drop++;
dev_kfree_skb_any(pkt);
RT_TRACE(_module_xmit_osdep_c_, _drv_notice_, ("rtw_xmit_entry: drop, tx_drop=%d\n", (u32)pxmitpriv->tx_drop));
exit:
_func_exit_;
return 0;
}
| gpl-2.0 |
mdeejay/picasso-hc-kernel | drivers/gpu/drm/drm_pci.c | 249 | 7341 | /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
/**
* \file drm_pci.c
* \brief Functions and ioctls to manage PCI memory
*
* \warning These interfaces aren't stable yet.
*
* \todo Implement the remaining ioctl's for the PCI pools.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author José Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include "drmP.h"
/**********************************************************************/
/** \name PCI memory */
/*@{*/
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
* Return NULL here for now to make sure nobody tries for larger alignment
*/
if (align > size)
return NULL;
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
memset(dmah->vaddr, 0, size);
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
return dmah;
}
EXPORT_SYMBOL(drm_pci_alloc);
/**
* \brief Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
#if 1
unsigned long addr;
size_t sz;
#endif
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
}
}
/**
* \brief Free a PCI consistent memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
}
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
/**
* Register.
*
* \param pdev - PCI device structure
* \param ent entry from the PCI ID table with device type flags
* \return zero on success or a negative number on failure.
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
DRM_DEBUG("\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
ret = pci_enable_device(pdev);
if (ret)
goto err_g1;
pci_set_master(pdev);
dev->pdev = pdev;
dev->dev = &pdev->dev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
mutex_lock(&drm_global_mutex);
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pci_set_drvdata(pdev, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g2;
}
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
if (dev->driver->load) {
ret = dev->driver->load(dev, ent->driver_data);
if (ret)
goto err_g4;
}
/* setup the grouping for the legacy output */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g4;
}
list_add_tail(&dev->driver_item, &driver->device_list);
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
mutex_unlock(&drm_global_mutex);
return 0;
err_g4:
drm_put_minor(&dev->primary);
err_g3:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g2:
pci_disable_device(pdev);
err_g1:
kfree(dev);
mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
/**
* PCI device initialization. Called via drm_init at module load time,
*
* \return zero on success or a negative number on failure.
*
* Initializes a drm_device structures,registering the
* stubs and initializing the AGP device.
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_pci_init(struct drm_driver *driver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
if (driver->driver_features & DRIVER_MODESET)
return pci_register_driver(&driver->pci_driver);
/* If not using KMS, fall back to stealth mode manual scanning. */
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = &driver->pci_driver.id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
* function that pci_get_subsys and pci_get_class used, we'd
* be able to just pass pid in instead of doing a two-stage
* thing.
*/
pdev = NULL;
while ((pdev =
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev)) != NULL) {
if ((pdev->class & pid->class_mask) != pid->class)
continue;
/* stealth mode requires a manual probe */
pci_dev_get(pdev);
drm_get_pci_dev(pdev, pid, driver);
}
}
return 0;
}
#else
int drm_pci_init(struct drm_driver *driver)
{
return -1;
}
#endif
/*@}*/
| gpl-2.0 |
kibuuka/dv80_2.6_emo | drivers/staging/wlags49_h2/wl_netdev.c | 761 | 61568 | /*******************************************************************************
* Agere Systems Inc.
* Wireless device driver for Linux (wlags49).
*
* Copyright (c) 1998-2003 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
*
* Initially developed by TriplePoint, Inc.
* http://www.triplepoint.com
*
*------------------------------------------------------------------------------
*
* This file contains handler functions registered with the net_device
* structure.
*
*------------------------------------------------------------------------------
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
******************************************************************************/
/*******************************************************************************
* include files
******************************************************************************/
#include <wl_version.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
// #include <linux/sched.h>
// #include <linux/ptrace.h>
// #include <linux/slab.h>
// #include <linux/ctype.h>
// #include <linux/string.h>
//#include <linux/timer.h>
// #include <linux/interrupt.h>
// #include <linux/in.h>
// #include <linux/delay.h>
// #include <linux/skbuff.h>
// #include <asm/io.h>
// #include <asm/system.h>
// #include <asm/bitops.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
// #include <linux/skbuff.h>
// #include <linux/if_arp.h>
// #include <linux/ioport.h>
#include <debug.h>
#include <hcf.h>
#include <dhf.h>
// #include <hcfdef.h>
#include <wl_if.h>
#include <wl_internal.h>
#include <wl_util.h>
#include <wl_priv.h>
#include <wl_main.h>
#include <wl_netdev.h>
#include <wl_wext.h>
#ifdef USE_PROFILE
#include <wl_profile.h>
#endif /* USE_PROFILE */
#ifdef BUS_PCMCIA
#include <wl_cs.h>
#endif /* BUS_PCMCIA */
#ifdef BUS_PCI
#include <wl_pci.h>
#endif /* BUS_PCI */
/*******************************************************************************
* global variables
******************************************************************************/
#if DBG
extern dbg_info_t *DbgInfo;
#endif /* DBG */
#if HCF_ENCAP
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN - 8)
#else
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN)
#endif
//static int mtu = MTU_MAX;
//MODULE_PARM(mtu, "i");
//MODULE_PARM_DESC(mtu, "MTU");
/*******************************************************************************
* macros
******************************************************************************/
#define BLOCK_INPUT(buf, len) \
desc->buf_addr = buf; \
desc->BUF_SIZE = len; \
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
#define BLOCK_INPUT_DMA(buf, len) memcpy( buf, desc_next->buf_addr, pktlen )
/*******************************************************************************
* function prototypes
******************************************************************************/
/*******************************************************************************
* wl_init()
*******************************************************************************
*
* DESCRIPTION:
*
* We never need to do anything when a "Wireless" device is "initialized"
* by the net software, because we only register already-found cards.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno value otherwise
*
******************************************************************************/
int wl_init( struct net_device *dev )
{
// unsigned long flags;
// struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_init" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
/* Nothing to do, but grab the spinlock anyway just in case we ever need
this routine */
// wl_lock( lp, &flags );
// wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return 0;
} // wl_init
/*============================================================================*/
/*******************************************************************************
* wl_config()
*******************************************************************************
*
* DESCRIPTION:
*
* Implement the SIOCSIFMAP interface.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
* map - a pointer to the device's ifmap structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_config( struct net_device *dev, struct ifmap *map )
{
DBG_FUNC( "wl_config" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "map", "0x%p", map );
/* The only thing we care about here is a port change. Since this not needed,
ignore the request. */
DBG_TRACE( DbgInfo, "%s: %s called.\n", dev->name, __FUNC__ );
DBG_LEAVE( DbgInfo );
return 0;
} // wl_config
/*============================================================================*/
/*******************************************************************************
* wl_stats()
*******************************************************************************
*
* DESCRIPTION:
*
* Return the current device statistics.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* a pointer to a net_device_stats structure containing the network
* statistics.
*
******************************************************************************/
struct net_device_stats *wl_stats( struct net_device *dev )
{
#ifdef USE_WDS
int count;
#endif /* USE_WDS */
unsigned long flags;
struct net_device_stats *pStats;
struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
//DBG_FUNC( "wl_stats" );
//DBG_ENTER( DbgInfo );
//DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
pStats = NULL;
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
wl_unlock( lp, &flags );
//DBG_LEAVE( DbgInfo );
return NULL;
}
#endif /* USE_RTS */
/* Return the statistics for the appropriate device */
#ifdef USE_WDS
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( dev == lp->wds_port[count].dev ) {
pStats = &( lp->wds_port[count].stats );
}
}
#endif /* USE_WDS */
/* If pStats is still NULL, then the device is not a WDS port */
if( pStats == NULL ) {
pStats = &( lp->stats );
}
wl_unlock( lp, &flags );
//DBG_LEAVE( DbgInfo );
return pStats;
} // wl_stats
/*============================================================================*/
/*******************************************************************************
* wl_open()
*******************************************************************************
*
* DESCRIPTION:
*
* Open the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_open(struct net_device *dev)
{
int status = HCF_SUCCESS;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_open" );
DBG_ENTER( DbgInfo );
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device open, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
#ifdef USE_PROFILE
parse_config( dev );
#endif
if( lp->portState == WVLAN_PORT_STATE_DISABLED ) {
DBG_TRACE( DbgInfo, "Enabling Port 0\n" );
status = wl_enable( lp );
if( status != HCF_SUCCESS ) {
DBG_TRACE( DbgInfo, "Enable port 0 failed: 0x%x\n", status );
}
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if ( strlen( lp->fw_image_filename ) ) {
DBG_TRACE( DbgInfo, ";???? Kludgy way to force a download\n" );
status = wl_go( lp );
} else {
status = wl_apply( lp );
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if( status != HCF_SUCCESS ) {
// Unsuccessful, try reset of the card to recover
status = wl_reset( dev );
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if( status == HCF_SUCCESS ) {
netif_carrier_on( dev );
WL_WDS_NETIF_CARRIER_ON( lp );
lp->is_handling_int = WL_HANDLING_INT; // Start handling interrupts
wl_act_int_on( lp );
netif_start_queue( dev );
WL_WDS_NETIF_START_QUEUE( lp );
} else {
wl_hcf_error( dev, status ); /* Report the error */
netif_device_detach( dev ); /* Stop the device and queue */
}
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return status;
} // wl_open
/*============================================================================*/
/*******************************************************************************
* wl_close()
*******************************************************************************
*
* DESCRIPTION:
*
* Close the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_close( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_close");
DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
/* Mark the adapter as busy */
netif_stop_queue( dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
netif_carrier_off( dev );
WL_WDS_NETIF_CARRIER_OFF( lp );
/* Shutdown the adapter:
Disable adapter interrupts
Stop Tx/Rx
Update statistics
Set low power mode
*/
wl_lock( lp, &flags );
wl_act_int_off( lp );
lp->is_handling_int = WL_NOT_HANDLING_INT; // Stop handling interrupts
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device close, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
/* Disable the ports */
wl_disable( lp );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return 0;
} // wl_close
/*============================================================================*/
static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
strncpy(info->version, DRV_VERSION_STR, sizeof(info->version) - 1);
// strncpy(info.fw_version, priv->fw_name,
// sizeof(info.fw_version) - 1);
if (dev->dev.parent) {
dev_set_name(dev->dev.parent, "%s", info->bus_info);
//strncpy(info->bus_info, dev->dev.parent->bus_id,
// sizeof(info->bus_info) - 1);
} else {
snprintf(info->bus_info, sizeof(info->bus_info) - 1,
"PCMCIA FIXME");
// "PCMCIA 0x%lx", priv->hw.iobase);
}
} // wl_get_drvinfo
static struct ethtool_ops wl_ethtool_ops = {
.get_drvinfo = wl_get_drvinfo,
.get_link = ethtool_op_get_link,
};
/*******************************************************************************
* wl_ioctl()
*******************************************************************************
*
* DESCRIPTION:
*
* The IOCTL handler for the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device struct.
* rq - a pointer to the IOCTL request buffer.
* cmd - the IOCTL command code.
*
* RETURNS:
*
* 0 on success
* errno value otherwise
*
******************************************************************************/
int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_ioctl" );
DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
wl_lock( lp, &flags );
wl_act_int_off( lp );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
/* Handle any RTS IOCTL here */
if( cmd == WL_IOCTL_RTS ) {
DBG_TRACE( DbgInfo, "IOCTL: WL_IOCTL_RTS\n" );
ret = wvlan_rts( (struct rtsreq *)rq, dev->base_addr );
} else {
DBG_TRACE( DbgInfo, "IOCTL not supported in RTS mode: 0x%X\n", cmd );
ret = -EOPNOTSUPP;
}
goto out_act_int_on_unlock;
}
#endif /* USE_RTS */
/* Only handle UIL IOCTL requests when the UIL has the system blocked. */
if( !(( lp->flags & WVLAN2_UIL_BUSY ) && ( cmd != WVLAN2_IOCTL_UIL ))) {
#ifdef USE_UIL
struct uilreq *urq = (struct uilreq *)rq;
#endif /* USE_UIL */
switch( cmd ) {
// ================== Private IOCTLs (up to 16) ==================
#ifdef USE_UIL
case WVLAN2_IOCTL_UIL:
DBG_TRACE( DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL\n" );
ret = wvlan_uil( urq, lp );
break;
#endif /* USE_UIL */
default:
DBG_TRACE(DbgInfo, "IOCTL CODE NOT SUPPORTED: 0x%X\n", cmd );
ret = -EOPNOTSUPP;
break;
}
} else {
DBG_WARNING( DbgInfo, "DEVICE IS BUSY, CANNOT PROCESS REQUEST\n" );
ret = -EBUSY;
}
#ifdef USE_RTS
out_act_int_on_unlock:
#endif /* USE_RTS */
wl_act_int_on( lp );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return ret;
} // wl_ioctl
/*============================================================================*/
#ifdef CONFIG_NET_POLL_CONTROLLER
void wl_poll(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
struct pt_regs regs;
wl_lock( lp, &flags );
wl_isr(dev->irq, dev, ®s);
wl_unlock( lp, &flags );
}
#endif
/*******************************************************************************
* wl_tx_timeout()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler called when, for some reason, a Tx request is not completed.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device struct.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_tx_timeout( struct net_device *dev )
{
#ifdef USE_WDS
int count;
#endif /* USE_WDS */
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
struct net_device_stats *pStats = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_tx_timeout" );
DBG_ENTER( DbgInfo );
DBG_WARNING( DbgInfo, "%s: Transmit timeout.\n", dev->name );
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping tx_timeout handler, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
/* Figure out which device (the "root" device or WDS port) this timeout
is for */
#ifdef USE_WDS
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( dev == lp->wds_port[count].dev ) {
pStats = &( lp->wds_port[count].stats );
/* Break the loop so that we can use the counter to access WDS
information in the private structure */
break;
}
}
#endif /* USE_WDS */
/* If pStats is still NULL, then the device is not a WDS port */
if( pStats == NULL ) {
pStats = &( lp->stats );
}
/* Accumulate the timeout error */
pStats->tx_errors++;
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return;
} // wl_tx_timeout
/*============================================================================*/
/*******************************************************************************
* wl_send()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data transmits.
*
* PARAMETERS:
*
* lp - a pointer to the device's wl_private struct.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_send( struct wl_private *lp )
{
int status;
DESC_STRCT *desc;
WVLAN_LFRAME *txF = NULL;
struct list_head *element;
int len;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_send" );
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
}
if( lp->dev == NULL ) {
DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
return FALSE;
}
/* Check for the availability of FIDs; if none are available, don't take any
frames off the txQ */
if( lp->hcfCtx.IFB_RscInd == 0 ) {
return FALSE;
}
/* Reclaim the TxQ Elements and place them back on the free queue */
if( !list_empty( &( lp->txQ[0] ))) {
element = lp->txQ[0].next;
txF = (WVLAN_LFRAME * )list_entry( element, WVLAN_LFRAME, node );
if( txF != NULL ) {
lp->txF.skb = txF->frame.skb;
lp->txF.port = txF->frame.port;
txF->frame.skb = NULL;
txF->frame.port = 0;
list_del( &( txF->node ));
list_add( element, &( lp->txFree ));
lp->txQ_count--;
if( lp->txQ_count < TX_Q_LOW_WATER_MARK ) {
if( lp->netif_queue_on == FALSE ) {
DBG_TX( DbgInfo, "Kickstarting Q: %d\n", lp->txQ_count );
netif_wake_queue( lp->dev );
WL_WDS_NETIF_WAKE_QUEUE( lp );
lp->netif_queue_on = TRUE;
}
}
}
}
if( lp->txF.skb == NULL ) {
return FALSE;
}
/* If the device has resources (FIDs) available, then Tx the packet */
/* Format the TxRequest and send it to the adapter */
len = lp->txF.skb->len < ETH_ZLEN ? ETH_ZLEN : lp->txF.skb->len;
desc = &( lp->desc_tx );
desc->buf_addr = lp->txF.skb->data;
desc->BUF_CNT = len;
desc->next_desc_addr = NULL;
status = hcf_send_msg( &( lp->hcfCtx ), desc, lp->txF.port );
if( status == HCF_SUCCESS ) {
lp->dev->trans_start = jiffies;
DBG_TX( DbgInfo, "Transmit...\n" );
if( lp->txF.port == HCF_PORT_0 ) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += lp->txF.skb->len;
}
#ifdef USE_WDS
else
{
lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_packets++;
lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_bytes += lp->txF.skb->len;
}
#endif /* USE_WDS */
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( lp->txF.skb );
lp->txF.skb = NULL;
lp->txF.port = 0;
}
return TRUE;
} // wl_send
/*============================================================================*/
/*******************************************************************************
* wl_tx()
*******************************************************************************
*
* DESCRIPTION:
*
* The Tx handler function for the network layer.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff structure containing the data to transfer.
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
{
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
WVLAN_LFRAME *txF = NULL;
struct list_head *element;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_tx" );
/* Grab the spinlock */
wl_lock( lp, &flags );
if( lp->flags & WVLAN2_UIL_BUSY ) {
DBG_WARNING( DbgInfo, "UIL has device blocked\n" );
/* Start dropping packets here??? */
wl_unlock( lp, &flags );
return 1;
}
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: we're getting a Tx...\n" );
wl_unlock( lp, &flags );
return 1;
}
#endif /* USE_RTS */
if( !lp->use_dma ) {
/* Get an element from the queue */
element = lp->txFree.next;
txF = (WVLAN_LFRAME *)list_entry( element, WVLAN_LFRAME, node );
if( txF == NULL ) {
DBG_ERROR( DbgInfo, "Problem with list_entry\n" );
wl_unlock( lp, &flags );
return 1;
}
/* Fill out the frame */
txF->frame.skb = skb;
txF->frame.port = port;
/* Move the frame to the txQ */
/* NOTE: Here's where we would do priority queueing */
list_del( &( txF->node ));
list_add( &( txF->node ), &( lp->txQ[0] ));
lp->txQ_count++;
if( lp->txQ_count >= DEFAULT_NUM_TX_FRAMES ) {
DBG_TX( DbgInfo, "Q Full: %d\n", lp->txQ_count );
if( lp->netif_queue_on == TRUE ) {
netif_stop_queue( lp->dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
lp->netif_queue_on = FALSE;
}
}
}
wl_act_int_off( lp ); /* Disable Interrupts */
/* Send the data to the hardware using the appropriate method */
#ifdef ENABLE_DMA
if( lp->use_dma ) {
wl_send_dma( lp, skb, port );
}
else
#endif
{
wl_send( lp );
}
/* Re-enable Interrupts, release the spinlock and return */
wl_act_int_on( lp );
wl_unlock( lp, &flags );
return 0;
} // wl_tx
/*============================================================================*/
/*******************************************************************************
* wl_rx()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data reception.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_rx(struct net_device *dev)
{
int port;
struct sk_buff *skb;
struct wl_private *lp = wl_priv(dev);
int status;
hcf_16 pktlen;
hcf_16 hfs_stat;
DESC_STRCT *desc;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: We're getting an Rx...\n" );
return -EIO;
}
#endif /* USE_RTS */
/* Read the HFS_STAT register from the lookahead buffer */
hfs_stat = (hcf_16)(( lp->lookAheadBuf[HFS_STAT] ) |
( lp->lookAheadBuf[HFS_STAT + 1] << 8 ));
/* Make sure the frame isn't bad */
if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS ) {
DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
lp->lookAheadBuf[HFS_STAT] );
return -EIO;
}
/* Determine what port this packet is for */
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
pktlen = lp->hcfCtx.IFB_RxLen;
if (pktlen != 0) {
skb = ALLOC_SKB(pktlen);
if (skb != NULL) {
/* Set the netdev based on the port */
switch( port ) {
#ifdef USE_WDS
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
skb->dev = lp->wds_port[port-1].dev;
break;
#endif /* USE_WDS */
case 0:
default:
skb->dev = dev;
break;
}
desc = &( lp->desc_rx );
desc->next_desc_addr = NULL;
/*
#define BLOCK_INPUT(buf, len) \
desc->buf_addr = buf; \
desc->BUF_SIZE = len; \
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
*/
GET_PACKET( skb->dev, skb, pktlen );
if( status == HCF_SUCCESS ) {
netif_rx( skb );
if( port == 0 ) {
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktlen;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_packets++;
lp->wds_port[port-1].stats.rx_bytes += pktlen;
}
#endif /* USE_WDS */
dev->last_rx = jiffies;
#ifdef WIRELESS_EXT
#ifdef WIRELESS_SPY
if( lp->spydata.spy_number > 0 ) {
char *srcaddr = skb->mac.raw + MAC_ADDR_SIZE;
wl_spy_gather( dev, srcaddr );
}
#endif /* WIRELESS_SPY */
#endif /* WIRELESS_EXT */
} else {
DBG_ERROR( DbgInfo, "Rx request to card FAILED\n" );
if( port == 0 ) {
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
dev_kfree_skb( skb );
}
} else {
DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
if( port == 0 ) {
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
}
}
}
return 0;
} // wl_rx
/*============================================================================*/
/*******************************************************************************
* wl_multicast()
*******************************************************************************
*
* DESCRIPTION:
*
* Function to handle multicast packets
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
#ifdef NEW_MULTICAST
void wl_multicast( struct net_device *dev )
{
#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //;?should we return an error status in AP mode
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_multicast" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
if( !wl_adapter_is_open( dev )) {
DBG_LEAVE( DbgInfo );
return;
}
#if DBG
if( DBG_FLAGS( DbgInfo ) & DBG_PARAM_ON ) {
DBG_PRINT(" flags: %s%s%s\n",
( dev->flags & IFF_PROMISC ) ? "Promiscous " : "",
( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
netdev_for_each_mc_addr(ha, dev)
DBG_PRINT(" %s (%d)\n", DbgHwAddr(ha->addr),
dev->addr_len);
}
#endif /* DBG */
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping multicast, in RTS mode\n" );
DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
wl_lock( lp, &flags );
wl_act_int_off( lp );
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) {
if( dev->flags & IFF_PROMISC ) {
/* Enable promiscuous mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
}
else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
( dev->flags & IFF_ALLMULTI )) {
/* Shutting off this filter will enable all multicast frames to
be sent up from the device; however, this is a static RID, so
a call to wl_apply() is needed */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
DBG_PRINT( "Enabling all multicast mode (IFF_ALLMULTI)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
else if (!netdev_mc_empty(dev)) {
/* Set the multicast addresses */
lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
x = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
ha->addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
/* Disable promiscuous mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
DBG_PRINT( "Disabling Promiscuous mode\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
/* Disable multicast mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
DBG_PRINT( "Disabling Multicast mode\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
/* Turning on this filter will prevent all multicast frames from
being sent up from the device; however, this is a static RID,
so a call to wl_apply() is needed */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
DBG_PRINT( "Disabling all multicast mode (IFF_ALLMULTI)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
}
wl_act_int_on( lp );
wl_unlock( lp, &flags );
}
DBG_LEAVE( DbgInfo );
#endif /* HCF_STA */
} // wl_multicast
/*============================================================================*/
#else /* NEW_MULTICAST */
void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
{
DBG_FUNC( "wl_multicast");
DBG_ENTER(DbgInfo);
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "num_addrs", "%d", num_addrs );
DBG_PARAM( DbgInfo, "addrs", "0x%p", addrs );
#error Obsolete set multicast interface!
DBG_LEAVE( DbgInfo );
} // wl_multicast
/*============================================================================*/
#endif /* NEW_MULTICAST */
static const struct net_device_ops wl_netdev_ops =
{
.ndo_start_xmit = &wl_tx_port0,
.ndo_set_config = &wl_config,
.ndo_get_stats = &wl_stats,
.ndo_set_multicast_list = &wl_multicast,
.ndo_init = &wl_insert,
.ndo_open = &wl_adapter_open,
.ndo_stop = &wl_adapter_close,
.ndo_do_ioctl = &wl_ioctl,
.ndo_tx_timeout = &wl_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = wl_poll,
#endif
};
/*******************************************************************************
* wl_device_alloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Create instances of net_device and wl_private for the new adapter
* and register the device's entry points in the net_device structure.
*
* PARAMETERS:
*
* N/A
*
* RETURNS:
*
* a pointer to an allocated and initialized net_device struct for this
* device.
*
******************************************************************************/
struct net_device * wl_device_alloc( void )
{
struct net_device *dev = NULL;
struct wl_private *lp = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_device_alloc" );
DBG_ENTER( DbgInfo );
/* Alloc a net_device struct */
dev = alloc_etherdev(sizeof(struct wl_private));
if (!dev)
return NULL;
/* Initialize the 'next' pointer in the struct. Currently only used for PCI,
but do it here just in case it's used for other buses in the future */
lp = wl_priv(dev);
/* Check MTU */
if( dev->mtu > MTU_MAX )
{
DBG_WARNING( DbgInfo, "%s: MTU set too high, limiting to %d.\n",
dev->name, MTU_MAX );
dev->mtu = MTU_MAX;
}
/* Setup the function table in the device structure. */
dev->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
lp->wireless_data.spy_data = &lp->spy_data;
dev->wireless_data = &lp->wireless_data;
dev->netdev_ops = &wl_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &wl_ethtool_ops;
netif_stop_queue( dev );
/* Allocate virutal devices for WDS support if needed */
WL_WDS_DEVICE_ALLOC( lp );
DBG_LEAVE( DbgInfo );
return dev;
} // wl_device_alloc
/*============================================================================*/
/*******************************************************************************
* wl_device_dealloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Free instances of net_device and wl_private strcutres for an adapter
* and perform basic cleanup.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_device_dealloc( struct net_device *dev )
{
// struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_device_dealloc" );
DBG_ENTER( DbgInfo );
/* Dealloc the WDS ports */
WL_WDS_DEVICE_DEALLOC( lp );
free_netdev( dev );
DBG_LEAVE( DbgInfo );
return;
} // wl_device_dealloc
/*============================================================================*/
/*******************************************************************************
* wl_tx_port0()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_0.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_0.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port0( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 0\n" );
return wl_tx( skb, dev, HCF_PORT_0 );
#ifdef ENABLE_DMA
return wl_tx_dma( skb, dev, HCF_PORT_0 );
#endif
} // wl_tx_port0
/*============================================================================*/
#ifdef USE_WDS
/*******************************************************************************
* wl_tx_port1()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_1.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_1.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port1( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 1\n" );
return wl_tx( skb, dev, HCF_PORT_1 );
} // wl_tx_port1
/*============================================================================*/
/*******************************************************************************
* wl_tx_port2()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_2.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_2.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port2( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 2\n" );
return wl_tx( skb, dev, HCF_PORT_2 );
} // wl_tx_port2
/*============================================================================*/
/*******************************************************************************
* wl_tx_port3()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_3.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_3.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port3( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 3\n" );
return wl_tx( skb, dev, HCF_PORT_3 );
} // wl_tx_port3
/*============================================================================*/
/*******************************************************************************
* wl_tx_port4()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_4.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_4.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port4( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 4\n" );
return wl_tx( skb, dev, HCF_PORT_4 );
} // wl_tx_port4
/*============================================================================*/
/*******************************************************************************
* wl_tx_port5()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_5.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_5.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port5( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 5\n" );
return wl_tx( skb, dev, HCF_PORT_5 );
} // wl_tx_port5
/*============================================================================*/
/*******************************************************************************
* wl_tx_port6()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_6.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_6.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 6\n" );
return wl_tx( skb, dev, HCF_PORT_6 );
} // wl_tx_port6
/*============================================================================*/
/*******************************************************************************
* wl_wds_device_alloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Create instances of net_device to represent the WDS ports, and register
* the device's entry points in the net_device structure.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A, but will place pointers to the allocated and initialized net_device
* structs in the private adapter structure.
*
******************************************************************************/
void wl_wds_device_alloc( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_wds_device_alloc" );
DBG_ENTER( DbgInfo );
/* WDS support requires additional net_device structs to be allocated,
so that user space apps can use these virtual devices to specify the
port on which to Tx/Rx */
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
dev_wds = kmalloc( sizeof( struct net_device ), GFP_KERNEL );
memset( dev_wds, 0, sizeof( struct net_device ));
ether_setup( dev_wds );
lp->wds_port[count].dev = dev_wds;
/* Re-use wl_init for all the devices, as it currently does nothing, but
is required. Re-use the stats/tx_timeout handler for all as well; the
WDS port which is requesting these operations can be determined by
the net_device pointer. Set the private member of all devices to point
to the same net_device struct; that way, all information gets
funnelled through the one "real" net_device. Name the WDS ports
"wds<n>" */
lp->wds_port[count].dev->init = &wl_init;
lp->wds_port[count].dev->get_stats = &wl_stats;
lp->wds_port[count].dev->tx_timeout = &wl_tx_timeout;
lp->wds_port[count].dev->watchdog_timeo = TX_TIMEOUT;
lp->wds_port[count].dev->priv = lp;
sprintf( lp->wds_port[count].dev->name, "wds%d", count );
}
/* Register the Tx handlers */
lp->wds_port[0].dev->hard_start_xmit = &wl_tx_port1;
lp->wds_port[1].dev->hard_start_xmit = &wl_tx_port2;
lp->wds_port[2].dev->hard_start_xmit = &wl_tx_port3;
lp->wds_port[3].dev->hard_start_xmit = &wl_tx_port4;
lp->wds_port[4].dev->hard_start_xmit = &wl_tx_port5;
lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
WL_WDS_NETIF_STOP_QUEUE( lp );
DBG_LEAVE( DbgInfo );
return;
} // wl_wds_device_alloc
/*============================================================================*/
/*******************************************************************************
* wl_wds_device_dealloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Free instances of net_device structures used to support WDS.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_device_dealloc( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_wds_device_dealloc" );
DBG_ENTER( DbgInfo );
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
dev_wds = lp->wds_port[count].dev;
if( dev_wds != NULL ) {
if( dev_wds->flags & IFF_UP ) {
dev_close( dev_wds );
dev_wds->flags &= ~( IFF_UP | IFF_RUNNING );
}
kfree( dev_wds );
lp->wds_port[count].dev = NULL;
}
}
DBG_LEAVE( DbgInfo );
return;
} // wl_wds_device_dealloc
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_start_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to start the netif queues of all the "virtual" network devices
* which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_start_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == FALSE ) {
netif_start_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = TRUE;
}
}
}
return;
} // wl_wds_netif_start_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_stop_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to stop the netif queues of all the "virtual" network devices
* which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_stop_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == TRUE ) {
netif_stop_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = FALSE;
}
}
}
return;
} // wl_wds_netif_stop_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_wake_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to wake the netif queues of all the "virtual" network devices
* which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_wake_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == FALSE ) {
netif_wake_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = TRUE;
}
}
}
return;
} // wl_wds_netif_wake_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_carrier_on()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to signal the network layer that carrier is present on all of the
* "virtual" network devices which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_carrier_on( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered ) {
netif_carrier_on( lp->wds_port[count].dev );
}
}
}
return;
} // wl_wds_netif_carrier_on
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_carrier_off()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to signal the network layer that carrier is NOT present on all of
* the "virtual" network devices which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_carrier_off( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered ) {
netif_carrier_off( lp->wds_port[count].dev );
}
}
}
return;
} // wl_wds_netif_carrier_off
/*============================================================================*/
#endif /* USE_WDS */
#ifdef ENABLE_DMA
/*******************************************************************************
* wl_send_dma()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data transmits when using busmaster DMA.
*
* PARAMETERS:
*
* lp - a pointer to the device's wl_private struct.
* skb - a pointer to the network layer's data buffer.
* port - the Hermes port on which to transmit.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
{
int len;
DESC_STRCT *desc = NULL;
DESC_STRCT *desc_next = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_send_dma" );
if( lp == NULL )
{
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
}
if( lp->dev == NULL )
{
DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
return FALSE;
}
/* AGAIN, ALL THE QUEUEING DONE HERE IN I/O MODE IS NOT PERFORMED */
if( skb == NULL )
{
DBG_WARNING (DbgInfo, "Nothing to send.\n");
return FALSE;
}
len = skb->len;
/* Get a free descriptor */
desc = wl_pci_dma_get_tx_packet( lp );
if( desc == NULL )
{
if( lp->netif_queue_on == TRUE ) {
netif_stop_queue( lp->dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
lp->netif_queue_on = FALSE;
dev_kfree_skb( skb );
return 0;
}
}
SET_BUF_CNT( desc, /*HCF_DMA_FD_CNT*/HFS_ADDR_DEST );
SET_BUF_SIZE( desc, HCF_DMA_TX_BUF1_SIZE );
desc_next = desc->next_desc_addr;
if( desc_next->buf_addr == NULL )
{
DBG_ERROR( DbgInfo, "DMA descriptor buf_addr is NULL\n" );
return FALSE;
}
/* Copy the payload into the DMA packet */
memcpy( desc_next->buf_addr, skb->data, len );
SET_BUF_CNT( desc_next, len );
SET_BUF_SIZE( desc_next, HCF_MAX_PACKET_SIZE );
hcf_dma_tx_put( &( lp->hcfCtx ), desc, 0 );
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( skb );
return TRUE;
} // wl_send_dma
/*============================================================================*/
/*******************************************************************************
* wl_rx_dma()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data reception when using busmaster DMA.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_rx_dma( struct net_device *dev )
{
int port;
hcf_16 pktlen;
hcf_16 hfs_stat;
struct sk_buff *skb;
struct wl_private *lp = NULL;
DESC_STRCT *desc, *desc_next;
//CFG_MB_INFO_RANGE2_STRCT x;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if((( lp = (struct wl_private *)dev->priv ) != NULL ) &&
!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: We're getting an Rx...\n" );
return -EIO;
}
#endif /* USE_RTS */
//if( lp->dma.status == 0 )
//{
desc = hcf_dma_rx_get( &( lp->hcfCtx ));
if( desc != NULL )
{
/* Check and see if we rcvd. a WMP frame */
/*
if((( *(hcf_8 *)&desc->buf_addr[HFS_STAT] ) &
( HFS_STAT_MSG_TYPE | HFS_STAT_ERR )) == HFS_STAT_WMP_MSG )
{
DBG_TRACE( DbgInfo, "Got a WMP frame\n" );
x.len = sizeof( CFG_MB_INFO_RANGE2_STRCT ) / sizeof( hcf_16 );
x.typ = CFG_MB_INFO;
x.base_typ = CFG_WMP;
x.frag_cnt = 2;
x.frag_buf[0].frag_len = GET_BUF_CNT( descp ) / sizeof( hcf_16 );
x.frag_buf[0].frag_addr = (hcf_8 *) descp->buf_addr ;
x.frag_buf[1].frag_len = ( GET_BUF_CNT( descp->next_desc_addr ) + 1 ) / sizeof( hcf_16 );
x.frag_buf[1].frag_addr = (hcf_8 *) descp->next_desc_addr->buf_addr ;
hcf_put_info( &( lp->hcfCtx ), (LTVP)&x );
}
*/
desc_next = desc->next_desc_addr;
/* Make sure the buffer isn't empty */
if( GET_BUF_CNT( desc ) == 0 ) {
DBG_WARNING( DbgInfo, "Buffer is empty!\n" );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
return -EIO;
}
/* Read the HFS_STAT register from the lookahead buffer */
hfs_stat = (hcf_16)( desc->buf_addr[HFS_STAT/2] );
/* Make sure the frame isn't bad */
if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS )
{
DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
desc->buf_addr[HFS_STAT/2] );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
return -EIO;
}
/* Determine what port this packet is for */
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
pktlen = GET_BUF_CNT(desc_next);
if (pktlen != 0) {
skb = ALLOC_SKB(pktlen);
if (skb != NULL) {
switch( port ) {
#ifdef USE_WDS
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
skb->dev = lp->wds_port[port-1].dev;
break;
#endif /* USE_WDS */
case 0:
default:
skb->dev = dev;
break;
}
GET_PACKET_DMA( skb->dev, skb, pktlen );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
netif_rx( skb );
if( port == 0 ) {
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktlen;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_packets++;
lp->wds_port[port-1].stats.rx_bytes += pktlen;
}
#endif /* USE_WDS */
dev->last_rx = jiffies;
} else {
DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
if( port == 0 )
{
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
}
}
}
//}
}
return 0;
} // wl_rx_dma
/*============================================================================*/
#endif // ENABLE_DMA
| gpl-2.0 |
thederekjay/glacier-GB-2.6.35-DARKSIDE | drivers/scsi/be2iscsi/be_mgmt.c | 761 | 11551 | /**
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
*
* Contact Information:
* linux-drivers@serverengines.com
*
* ServerEngines
* 209 N. Fair Oaks Ave
* Sunnyvale, CA 94085
*
*/
#include "be_mgmt.h"
#include "be_iscsi.h"
unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_fw_cfg *req = embedded_payload(wrb);
int status = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
status = be_mbox_notify(ctrl);
if (!status) {
struct be_fw_cfg *pfw_cfg;
pfw_cfg = req;
phba->fw_config.phys_port = pfw_cfg->phys_port;
phba->fw_config.iscsi_icd_start =
pfw_cfg->ulp[0].icd_base;
phba->fw_config.iscsi_icd_count =
pfw_cfg->ulp[0].icd_count;
phba->fw_config.iscsi_cid_start =
pfw_cfg->ulp[0].sq_base;
phba->fw_config.iscsi_cid_count =
pfw_cfg->ulp[0].sq_count;
if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
SE_DEBUG(DBG_LVL_8,
"FW reported MAX CXNS as %d \t"
"Max Supported = %d.\n",
phba->fw_config.iscsi_cid_count,
BE2_MAX_SESSIONS);
phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
}
} else {
shost_printk(KERN_WARNING, phba->shost,
"Failed in mgmt_get_fw_config \n");
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
{
struct be_dma_mem nonemb_cmd;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_mgmt_controller_attributes *req;
struct be_sge *sge = nonembedded_sgl(wrb);
int status = 0;
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
sizeof(struct be_mgmt_controller_attributes),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for mgmt_check_supported_fw"
"\n");
return -1;
}
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
req = nonemb_cmd.va;
memset(req, 0, sizeof(*req));
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd.size);
status = be_mbox_notify(ctrl);
if (!status) {
struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
resp->params.hba_attribs.flashrom_version_string);
SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
resp->params.hba_attribs.firmware_version_string);
SE_DEBUG(DBG_LVL_8,
"Developer Build, not performing version check...\n");
phba->fw_config.iscsi_features =
resp->params.hba_attribs.iscsi_features;
SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n",
phba->fw_config.iscsi_features);
} else
SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
spin_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return status;
}
unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
struct iscsi_cleanup_req *req = embedded_payload(wrb);
int status = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
req->chute = chute;
req->hdr_ring_id = 0;
req->data_ring_id = 0;
status = be_mcc_notify_wait(phba);
if (status)
shost_printk(KERN_WARNING, phba->shost,
" mgmt_epfw_cleanup , FAILED\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct invalidate_command_table *inv_tbl,
unsigned int num_invalidate, unsigned int cid)
{
struct be_dma_mem nonemb_cmd;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_sge *sge;
struct invalidate_commands_params_in *req;
unsigned int i, tag = 0;
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
sizeof(struct invalidate_commands_params_in),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for mgmt_invalidate_icds\n");
spin_unlock(&ctrl->mbox_lock);
return 0;
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
req = nonemb_cmd.va;
memset(req, 0, sizeof(*req));
wrb = wrb_from_mccq(phba);
sge = nonembedded_sgl(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
sizeof(*req));
req->ref_handle = 0;
req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
for (i = 0; i < num_invalidate; i++) {
req->table[i].icd = inv_tbl->icd;
req->table[i].cid = inv_tbl->cid;
req->icd_count++;
inv_tbl++;
}
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd.size);
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return tag;
}
unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
unsigned short cid,
unsigned short issue_reset,
unsigned short savecfg_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct iscsi_invalidate_connection_params_in *req;
unsigned int tag = 0;
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
wrb->tag0 |= tag;
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
sizeof(*req));
req->session_handle = beiscsi_ep->fw_handle;
req->cid = cid;
if (issue_reset)
req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST;
else
req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
req->save_cfg = savecfg_flag;
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
unsigned short cid, unsigned int upload_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct tcp_upload_params_in *req;
unsigned int tag = 0;
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
req->id = (unsigned short)cid;
req->upload_type = (unsigned char)upload_flag;
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct tcp_connect_and_offload_in *req;
unsigned short def_hdr_id;
unsigned short def_data_id;
struct phys_addr template_address = { 0, 0 };
struct phys_addr *ptemplate_address;
unsigned int tag = 0;
unsigned int i;
unsigned short cid = beiscsi_ep->ep_cid;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
sizeof(*req));
if (dst_addr->sa_family == PF_INET) {
__be32 s_addr = daddr_in->sin_addr.s_addr;
req->ip_address.ip_type = BE2_IPV4;
req->ip_address.ip_address[0] = s_addr & 0x000000ff;
req->ip_address.ip_address[1] = (s_addr & 0x0000ff00) >> 8;
req->ip_address.ip_address[2] = (s_addr & 0x00ff0000) >> 16;
req->ip_address.ip_address[3] = (s_addr & 0xff000000) >> 24;
req->tcp_port = ntohs(daddr_in->sin_port);
beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
beiscsi_ep->ip_type = BE2_IPV4;
} else if (dst_addr->sa_family == PF_INET6) {
req->ip_address.ip_type = BE2_IPV6;
memcpy(&req->ip_address.ip_address,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
req->tcp_port = ntohs(daddr_in6->sin6_port);
beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
memcpy(&beiscsi_ep->dst6_addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
beiscsi_ep->ip_type = BE2_IPV6;
} else{
shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
dst_addr->sa_family);
spin_unlock(&ctrl->mbox_lock);
return -EINVAL;
}
req->cid = cid;
i = phba->nxt_cqid++;
if (phba->nxt_cqid == phba->num_cpus)
phba->nxt_cqid = 0;
req->cq_id = phwi_context->be_cq[i].id;
SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d \n", i, req->cq_id);
req->defq_id = def_hdr_id;
req->hdr_ring_id = def_hdr_id;
req->data_ring_id = def_data_id;
req->do_offload = 1;
req->dataout_template_pa.lo = ptemplate_address->lo;
req->dataout_template_pa.hi = ptemplate_address->hi;
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_addr *req;
unsigned int tag = 0;
SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
sizeof(*req));
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
| gpl-2.0 |
Racing1/android_kernel_htc_ruby_aosp | drivers/infiniband/hw/nes/nes_nic.c | 761 | 61380 | /*
* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <linux/inet.h>
#include "nes.h"
static struct nic_qp_map nic_qp_mapping_0[] = {
{16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0},
{20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0},
{18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_1[] = {
{18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_2[] = {
{20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}
};
static struct nic_qp_map nic_qp_mapping_3[] = {
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_4[] = {
{28,8,0,0},{32,12,0,0}
};
static struct nic_qp_map nic_qp_mapping_5[] = {
{29,9,1,0},{33,13,1,0}
};
static struct nic_qp_map nic_qp_mapping_6[] = {
{30,10,2,0},{34,14,2,0}
};
static struct nic_qp_map nic_qp_mapping_7[] = {
{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map *nic_qp_mapping_per_function[] = {
nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3,
nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7
};
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
static int debug = -1;
static int nics_per_function = 1;
/**
* nes_netdev_poll
*/
static int nes_netdev_poll(struct napi_struct *napi, int budget)
{
struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
nesvnic->budget = budget;
nescq->cqes_pending = 0;
nescq->rx_cqes_completed = 0;
nescq->cqe_allocs_pending = 0;
nescq->rx_pkts_indicated = 0;
nes_nic_ce_handler(nesdev, nescq);
if (nescq->cqes_pending == 0) {
napi_complete(napi);
/* clear out completed cqes and arm */
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
nescq->cq_number | (nescq->cqe_allocs_pending << 16));
nes_read32(nesdev->regs+NES_CQE_ALLOC);
} else {
/* clear out completed cqes but don't arm */
nes_write32(nesdev->regs+NES_CQE_ALLOC,
nescq->cq_number | (nescq->cqe_allocs_pending << 16));
nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
nesvnic->netdev->name);
}
return nescq->rx_pkts_indicated;
}
/**
* nes_netdev_open - Activate the network interface; ifconfig
* ethx up.
*/
static int nes_netdev_open(struct net_device *netdev)
{
u32 macaddr_low;
u16 macaddr_high;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
int ret;
int i;
struct nes_vnic *first_nesvnic = NULL;
u32 nic_active_bit;
u32 nic_active;
struct list_head *list_pos, *list_temp;
assert(nesdev != NULL);
if (nesvnic->netdev_open == 1)
return 0;
if (netif_msg_ifup(nesvnic))
printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
ret = nes_init_nic_qp(nesdev, netdev);
if (ret) {
return ret;
}
netif_carrier_off(netdev);
netif_stop_queue(netdev);
if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) {
nesvnic->nesibdev = nes_init_ofa_device(netdev);
if (nesvnic->nesibdev == NULL) {
printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name);
} else {
nesvnic->nesibdev->nesvnic = nesvnic;
ret = nes_register_ofa_device(nesvnic->nesibdev);
if (ret) {
printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n",
netdev->name, ret);
}
}
}
/* Set packet filters */
nic_active_bit = 1 << nesvnic->nic_index;
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
macaddr_low += (u32)netdev->dev_addr[5];
/* Program the various MAC regs */
for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
if (nesvnic->qp_nic_index[i] == 0xf) {
break;
}
nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
" (Addr:%08X) = %08X, HIGH = %08X.\n",
i, nesvnic->qp_nic_index[i],
NES_IDX_PERFECT_FILTER_LOW+
(nesvnic->qp_nic_index[i] * 8),
macaddr_low,
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
macaddr_low);
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
}
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
nesvnic->nic_cq.cq_number);
nes_read32(nesdev->regs+NES_CQE_ALLOC);
list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
first_nesvnic = container_of(list_pos, struct nes_vnic, list);
if (first_nesvnic->netdev_open == 1)
break;
}
if (first_nesvnic->netdev_open == 0) {
nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index),
~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
first_nesvnic = nesvnic;
}
if (first_nesvnic->linkup) {
/* Enable network packets */
nesvnic->linkup = 1;
netif_start_queue(netdev);
netif_carrier_on(netdev);
}
napi_enable(&nesvnic->napi);
nesvnic->netdev_open = 1;
return 0;
}
/**
* nes_netdev_stop
*/
static int nes_netdev_stop(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 nic_active_mask;
u32 nic_active;
struct nes_vnic *first_nesvnic = NULL;
struct list_head *list_pos, *list_temp;
nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
nesvnic, nesdev, netdev, netdev->name);
if (nesvnic->netdev_open == 0)
return 0;
if (netif_msg_ifdown(nesvnic))
printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
/* Disable network packets */
napi_disable(&nesvnic->napi);
netif_stop_queue(netdev);
list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
first_nesvnic = container_of(list_pos, struct nes_vnic, list);
if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic))
break;
}
if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) &&
(PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) !=
PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+
(0x200*nesdev->mac_index), 0xffffffff);
nes_write_indexed(first_nesvnic->nesdev,
NES_IDX_MAC_INT_MASK+
(0x200*first_nesvnic->nesdev->mac_index),
~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
} else {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
}
nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+
(nesvnic->perfect_filter_index*8), 0);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
if (nesvnic->of_device_registered) {
nes_destroy_ofa_device(nesvnic->nesibdev);
nesvnic->nesibdev = NULL;
nesvnic->of_device_registered = 0;
}
nes_destroy_nic_qp(nesvnic);
nesvnic->netdev_open = 0;
return 0;
}
/**
* nes_nic_send
*/
static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic *nesnic = &nesvnic->nic;
struct nes_hw_nic_sq_wqe *nic_sqe;
struct tcphdr *tcph;
__le16 *wqe_fragment_length;
u32 wqe_misc;
u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
u16 skb_fragment_index;
dma_addr_t bus_address;
nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
} else
wqe_misc = 0;
/* bump past the vlan tag */
wqe_fragment_length++;
/* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tcph = tcp_hdr(skb);
if (1) {
if (skb_is_gso(skb)) {
/* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
netdev->name, skb_is_gso(skb)); */
wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
((u32)tcph->doff) |
(((u32)(((unsigned char *)tcph) - skb->data)) << 4));
} else {
wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
}
}
} else { /* CHECKSUM_HW */
wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
skb->len);
memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb)));
wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
skb_headlen(skb)));
wqe_fragment_length[1] = 0;
if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
if ((skb_shinfo(skb)->nr_frags + 1) > 4) {
nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n",
netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
kfree_skb(skb);
nesvnic->tx_sw_dropped++;
return NETDEV_TX_LOCKED;
}
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index++] =
cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE);
wqe_fragment_length[wqe_fragment_index] = 0;
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
((u64)(bus_address)));
nesnic->tx_skb[nesnic->sq_head] = skb;
}
if (skb_headlen(skb) == skb->len) {
if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
nesnic->tx_skb[nesnic->sq_head] = skb;
}
} else {
/* Deal with Fragments */
nesnic->tx_skb[nesnic->sq_head] = skb;
for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
skb_fragment_index++) {
bus_address = pci_map_page( nesdev->pcidev,
skb_shinfo(skb)->frags[skb_fragment_index].page,
skb_shinfo(skb)->frags[skb_fragment_index].page_offset,
skb_shinfo(skb)->frags[skb_fragment_index].size,
PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
bus_address);
wqe_fragment_index++;
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
}
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size - 1;
return NETDEV_TX_OK;
}
/**
* nes_netdev_start_xmit
*/
static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic *nesnic = &nesvnic->nic;
struct nes_hw_nic_sq_wqe *nic_sqe;
struct tcphdr *tcph;
/* struct udphdr *udph; */
#define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS
/* 64K segment plus overflow on each side */
dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
dma_addr_t bus_address;
u32 tso_frag_index;
u32 tso_frag_count;
u32 tso_wqe_length;
u32 curr_tcp_seq;
u32 wqe_count=1;
u32 send_rc;
struct iphdr *iph;
__le16 *wqe_fragment_length;
u32 nr_frags;
u32 original_first_length;
/* u64 *wqe_fragment_address; */
/* first fragment (0) is used by copy buffer */
u16 wqe_fragment_index=1;
u16 hoffset;
u16 nhoffset;
u16 wqes_needed;
u16 wqes_available;
u32 old_head;
u32 wqe_misc;
/*
* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
* " (%u frags), tso_size=%u\n",
* netdev->name, skb->len, skb_headlen(skb),
* skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
if (!netif_carrier_ok(netdev))
return NETDEV_TX_OK;
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
/* Check if SQ is full */
if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
if (!netif_queue_stopped(netdev)) {
netif_stop_queue(netdev);
barrier();
if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) {
netif_start_queue(netdev);
goto sq_no_longer_full;
}
}
nesvnic->sq_full++;
return NETDEV_TX_BUSY;
}
sq_no_longer_full:
nr_frags = skb_shinfo(skb)->nr_frags;
if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
nr_frags++;
}
/* Check if too many fragments */
if (unlikely((nr_frags > 4))) {
if (skb_is_gso(skb)) {
nesvnic->segmented_tso_requests++;
nesvnic->tso_requests++;
old_head = nesnic->sq_head;
/* Basically 4 fragments available per WQE with extended fragments */
wqes_needed = nr_frags >> 2;
wqes_needed += (nr_frags&3)?1:0;
wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) &
(nesnic->sq_size - 1);
if (unlikely(wqes_needed > wqes_available)) {
if (!netif_queue_stopped(netdev)) {
netif_stop_queue(netdev);
barrier();
wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) &
(nesnic->sq_size - 1);
if (wqes_needed <= wqes_available) {
netif_start_queue(netdev);
goto tso_sq_no_longer_full;
}
}
nesvnic->sq_full++;
nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
netdev->name);
return NETDEV_TX_BUSY;
}
tso_sq_no_longer_full:
/* Map all the buffers */
for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
tso_frag_count++) {
tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev,
skb_shinfo(skb)->frags[tso_frag_count].page,
skb_shinfo(skb)->frags[tso_frag_count].page_offset,
skb_shinfo(skb)->frags[tso_frag_count].size,
PCI_DMA_TODEVICE);
}
tso_frag_index = 0;
curr_tcp_seq = ntohl(tcp_hdr(skb)->seq);
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2);
for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) {
tso_wqe_length = 0;
nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
wqe_fragment_length =
(__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb) );
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
} else
wqe_misc = 0;
/* bump past the vlan tag */
wqe_fragment_length++;
/* Assumes header totally fits in allocated buffer and is in first fragment */
if (original_first_length > NES_FIRST_FRAG_SIZE) {
nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
original_first_length, NES_FIRST_FRAG_SIZE);
nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
" (%u frags), tso_size=%u\n",
netdev->name,
skb->len, skb_headlen(skb),
skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
}
memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
original_first_length));
iph = (struct iphdr *)
(&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]);
tcph = (struct tcphdr *)
(&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]);
if ((wqe_count+1)!=(u32)wqes_needed) {
tcph->fin = 0;
tcph->psh = 0;
tcph->rst = 0;
tcph->urg = 0;
}
if (wqe_count) {
tcph->syn = 0;
}
tcph->seq = htonl(curr_tcp_seq);
wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
original_first_length));
wqe_fragment_index = 1;
if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) {
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length,
skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index++] =
cpu_to_le16(skb_headlen(skb) - original_first_length);
wqe_fragment_length[wqe_fragment_index] = 0;
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
bus_address);
tso_wqe_length += skb_headlen(skb) -
original_first_length;
}
while (wqe_fragment_index < 5) {
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
(u64)tso_bus_address[tso_frag_index]);
wqe_fragment_index++;
tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
if (tso_frag_index == tso_frag_count)
break;
}
if ((wqe_count+1) == (u32)wqes_needed) {
nesnic->tx_skb[nesnic->sq_head] = skb;
} else {
nesnic->tx_skb[nesnic->sq_head] = NULL;
}
wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
} else {
iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX,
wqe_misc);
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
((u32)tcph->doff) | (((u32)hoffset) << 4));
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
tso_wqe_length + original_first_length);
curr_tcp_seq += tso_wqe_length;
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size-1;
}
} else {
nesvnic->linearized_skbs++;
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
skb_linearize(skb);
skb_set_transport_header(skb, hoffset);
skb_set_network_header(skb, nhoffset);
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK)
return NETDEV_TX_OK;
}
} else {
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK)
return NETDEV_TX_OK;
}
barrier();
if (wqe_count)
nes_write32(nesdev->regs+NES_WQE_ALLOC,
(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/**
* nes_netdev_get_stats
*/
static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u64 u64temp;
u32 u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->endnode_nstat_rx_discard += u32temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_rx_octets += u64temp;
nesvnic->netstats.rx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_rx_frames += u64temp;
nesvnic->netstats.rx_packets += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_tx_octets += u64temp;
nesvnic->netstats.tx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_tx_frames += u64temp;
nesvnic->netstats.tx_packets += u64temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_short_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_length_errors += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_crc_errors += u32temp;
nesvnic->netstats.rx_crc_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_tx_errors += u32temp;
nesvnic->netstats.tx_errors += u32temp;
return &nesvnic->netstats;
}
/**
* nes_netdev_tx_timeout
*/
static void nes_netdev_tx_timeout(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
if (netif_msg_timer(nesvnic))
nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name);
}
/**
* nes_netdev_set_mac_address
*/
static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct sockaddr *mac_addr = p;
int i;
u32 macaddr_low;
u16 macaddr_high;
if (!is_valid_ether_addr(mac_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
printk(PFX "%s: Address length = %d, Address = %pM\n",
__func__, netdev->addr_len, mac_addr->sa_data);
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
macaddr_low += (u32)netdev->dev_addr[5];
for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
if (nesvnic->qp_nic_index[i] == 0xf) {
break;
}
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
macaddr_low);
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
}
return 0;
}
static void set_allmulti(struct nes_device *nesdev, u32 nic_active_bit)
{
u32 nic_active;
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
#define get_addr(addrs, index) ((addrs) + (index) * ETH_ALEN)
/**
* nes_netdev_set_multicast_list
*/
static void nes_netdev_set_multicast_list(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
u32 nic_active_bit;
u32 nic_active;
u32 perfect_filter_register_address;
u32 macaddr_low;
u16 macaddr_high;
u8 mc_all_on = 0;
u8 mc_index;
int mc_nic_index = -1;
u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count *
nics_per_function, 4);
u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated;
unsigned long flags;
int mc_count = netdev_mc_count(netdev);
spin_lock_irqsave(&nesadapter->resource_lock, flags);
nic_active_bit = 1 << nesvnic->nic_index;
if (netdev->flags & IFF_PROMISC) {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
mc_all_on = 1;
} else if ((netdev->flags & IFF_ALLMULTI) ||
(nesvnic->nic_index > 3)) {
set_allmulti(nesdev, nic_active_bit);
mc_all_on = 1;
} else {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
mc_count, !!(netdev->flags & IFF_PROMISC),
!!(netdev->flags & IFF_ALLMULTI));
if (!mc_all_on) {
char *addrs;
int i;
struct netdev_hw_addr *ha;
addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
if (!addrs) {
set_allmulti(nesdev, nic_active_bit);
goto unlock;
}
i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
pft_entries_preallocated * 0x8;
for (i = 0, mc_index = 0; mc_index < max_pft_entries_avaiable;
mc_index++) {
while (i < mc_count && nesvnic->mcrq_mcast_filter &&
((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic,
get_addr(addrs, i++))) == 0));
if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index;
while (nesadapter->pft_mcast_map[mc_index] < 16 &&
nesadapter->pft_mcast_map[mc_index] !=
nesvnic->nic_index &&
mc_index < max_pft_entries_avaiable) {
nes_debug(NES_DBG_NIC_RX,
"mc_index=%d skipping nic_index=%d,\
used for=%d \n", mc_index,
nesvnic->nic_index,
nesadapter->pft_mcast_map[mc_index]);
mc_index++;
}
if (mc_index >= max_pft_entries_avaiable)
break;
if (i < mc_count) {
char *addr = get_addr(addrs, i++);
nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %pM to register 0x%04X nic_idx=%d\n",
addr,
perfect_filter_register_address+(mc_index * 8),
mc_nic_index);
macaddr_high = ((u16) addr[0]) << 8;
macaddr_high += (u16) addr[1];
macaddr_low = ((u32) addr[2]) << 24;
macaddr_low += ((u32) addr[3]) << 16;
macaddr_low += ((u32) addr[4]) << 8;
macaddr_low += (u32) addr[5];
nes_write_indexed(nesdev,
perfect_filter_register_address+(mc_index * 8),
macaddr_low);
nes_write_indexed(nesdev,
perfect_filter_register_address+4+(mc_index * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)(1<<mc_nic_index)) << 16)));
nesadapter->pft_mcast_map[mc_index] =
nesvnic->nic_index;
} else {
nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
perfect_filter_register_address+(mc_index * 8));
nes_write_indexed(nesdev,
perfect_filter_register_address+4+(mc_index * 8),
0);
nesadapter->pft_mcast_map[mc_index] = 255;
}
}
kfree(addrs);
/* PFT is not large enough */
if (i < mc_count)
set_allmulti(nesdev, nic_active_bit);
}
unlock:
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
}
/**
* nes_netdev_change_mtu
*/
static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
int ret = 0;
u8 jumbomode = 0;
u32 nic_active;
u32 nic_active_bit;
u32 uc_all_active;
u32 mc_all_active;
if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
return -EINVAL;
netdev->mtu = new_mtu;
nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
if (netdev->mtu > 1500) {
jumbomode=1;
}
nes_nic_init_timer_defaults(nesdev, jumbomode);
if (netif_running(netdev)) {
nic_active_bit = 1 << nesvnic->nic_index;
mc_all_active = nes_read_indexed(nesdev,
NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit;
uc_all_active = nes_read_indexed(nesdev,
NES_IDX_NIC_UNICAST_ALL) & nic_active_bit;
nes_netdev_stop(netdev);
nes_netdev_open(netdev);
nic_active = nes_read_indexed(nesdev,
NES_IDX_NIC_MULTICAST_ALL);
nic_active |= mc_all_active;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active |= uc_all_active;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
return ret;
}
static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Link Change Interrupts",
"Linearized SKBs",
"T/GSO Requests",
"Pause Frames Sent",
"Pause Frames Received",
"Internal Routing Errors",
"SQ SW Dropped SKBs",
"SQ Full",
"Segmented TSO Requests",
"Rx Symbol Errors",
"Rx Jabber Errors",
"Rx Oversized Frames",
"Rx Short Frames",
"Rx Length Errors",
"Rx CRC Errors",
"Rx Port Discard",
"Endnode Rx Discards",
"Endnode Rx Octets",
"Endnode Rx Frames",
"Endnode Tx Octets",
"Endnode Tx Frames",
"Tx Errors",
"mh detected",
"mh pauses",
"Retransmission Count",
"CM Connects",
"CM Accepts",
"Disconnects",
"Connected Events",
"Connect Requests",
"CM Rejects",
"ModifyQP Timeouts",
"CreateQPs",
"SW DestroyQPs",
"DestroyQPs",
"CM Closes",
"CM Packets Sent",
"CM Packets Bounced",
"CM Packets Created",
"CM Packets Rcvd",
"CM Packets Dropped",
"CM Packets Retrans",
"CM Listens Created",
"CM Listens Destroyed",
"CM Backlog Drops",
"CM Loopbacks",
"CM Nodes Created",
"CM Nodes Destroyed",
"CM Accel Drops",
"CM Resets Received",
"Free 4Kpbls",
"Free 256pbls",
"Timer Inits",
"LRO aggregated",
"LRO flushed",
"LRO no_desc",
};
#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
/**
* nes_netdev_get_rx_csum
*/
static u32 nes_netdev_get_rx_csum (struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
if (nesvnic->rx_checksum_disabled)
return 0;
else
return 1;
}
/**
* nes_netdev_set_rc_csum
*/
static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
if (enable)
nesvnic->rx_checksum_disabled = 0;
else
nesvnic->rx_checksum_disabled = 1;
return 0;
}
/**
* nes_netdev_get_sset_count
*/
static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset)
{
if (stringset == ETH_SS_STATS)
return NES_ETHTOOL_STAT_COUNT;
else
return -EINVAL;
}
/**
* nes_netdev_get_strings
*/
static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
u8 *ethtool_strings)
{
if (stringset == ETH_SS_STATS)
memcpy(ethtool_strings,
&nes_ethtool_stringset,
sizeof(nes_ethtool_stringset));
}
/**
* nes_netdev_get_ethtool_stats
*/
static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
{
u64 u64temp;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 nic_count;
u32 u32temp;
u32 index = 0;
target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
target_stat_values[index] = nesvnic->nesdev->link_status_interrupts;
target_stat_values[++index] = nesvnic->linearized_skbs;
target_stat_values[++index] = nesvnic->tso_requests;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_pause_frames_sent += u32temp;
target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_pause_frames_received += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
nesvnic->nesdev->port_rx_discards += u32temp;
nesvnic->netstats.rx_dropped += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
nesvnic->nesdev->port_tx_discards += u32temp;
nesvnic->netstats.tx_dropped += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_short_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_length_errors += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_crc_errors += u32temp;
nesvnic->netstats.rx_crc_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_tx_errors += u32temp;
nesvnic->netstats.tx_errors += u32temp;
for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
if (nesvnic->qp_nic_index[nic_count] == 0xf)
break;
u32temp = nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_DISCARD +
(nesvnic->qp_nic_index[nic_count]*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->endnode_nstat_rx_discard += u32temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_rx_octets += u64temp;
nesvnic->netstats.rx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_rx_frames += u64temp;
nesvnic->netstats.rx_packets += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_tx_octets += u64temp;
nesvnic->netstats.tx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_tx_frames += u64temp;
nesvnic->netstats.tx_packets += u64temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200));
nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
}
target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received;
target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err;
target_stat_values[++index] = nesvnic->tx_sw_dropped;
target_stat_values[++index] = nesvnic->sq_full;
target_stat_values[++index] = nesvnic->segmented_tso_requests;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
target_stat_values[++index] = mh_detected;
target_stat_values[++index] = mh_pauses_sent;
target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
target_stat_values[++index] = atomic_read(&cm_connects);
target_stat_values[++index] = atomic_read(&cm_accepts);
target_stat_values[++index] = atomic_read(&cm_disconnects);
target_stat_values[++index] = atomic_read(&cm_connecteds);
target_stat_values[++index] = atomic_read(&cm_connect_reqs);
target_stat_values[++index] = atomic_read(&cm_rejects);
target_stat_values[++index] = atomic_read(&mod_qp_timouts);
target_stat_values[++index] = atomic_read(&qps_created);
target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
target_stat_values[++index] = atomic_read(&qps_destroyed);
target_stat_values[++index] = atomic_read(&cm_closes);
target_stat_values[++index] = cm_packets_sent;
target_stat_values[++index] = cm_packets_bounced;
target_stat_values[++index] = cm_packets_created;
target_stat_values[++index] = cm_packets_received;
target_stat_values[++index] = cm_packets_dropped;
target_stat_values[++index] = cm_packets_retrans;
target_stat_values[++index] = atomic_read(&cm_listens_created);
target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
target_stat_values[++index] = cm_backlog_drops;
target_stat_values[++index] = atomic_read(&cm_loopbacks);
target_stat_values[++index] = atomic_read(&cm_nodes_created);
target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
target_stat_values[++index] = atomic_read(&cm_resets_recvd);
target_stat_values[++index] = nesadapter->free_4kpbl;
target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
}
/**
* nes_netdev_get_drvinfo
*/
static void nes_netdev_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
strcpy(drvinfo->driver, DRV_NAME);
strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
nesadapter->firmware_version & 0x000000ff);
strcpy(drvinfo->version, DRV_VERSION);
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
drvinfo->regdump_len = 0;
}
/**
* nes_netdev_set_coalesce
*/
static int nes_netdev_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et_coalesce)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
unsigned long flags;
spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
if (et_coalesce->rx_max_coalesced_frames_low) {
shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
}
if (et_coalesce->rx_max_coalesced_frames_irq) {
shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
}
if (et_coalesce->rx_max_coalesced_frames_high) {
shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high;
}
if (et_coalesce->rx_coalesce_usecs_low) {
shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low;
}
if (et_coalesce->rx_coalesce_usecs_high) {
shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high;
}
spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
/* using this to drive total interrupt moderation */
nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
if (et_coalesce->use_adaptive_rx_coalesce) {
nesadapter->et_use_adaptive_rx_coalesce = 1;
nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
nesadapter->et_rx_coalesce_usecs_irq = 0;
if (et_coalesce->pkt_rate_low) {
nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
}
} else {
nesadapter->et_use_adaptive_rx_coalesce = 0;
nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
if (nesadapter->et_rx_coalesce_usecs_irq) {
nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
}
}
return 0;
}
/**
* nes_netdev_get_coalesce
*/
static int nes_netdev_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et_coalesce)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct ethtool_coalesce temp_et_coalesce;
struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
unsigned long flags;
memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
if (nesadapter->et_use_adaptive_rx_coalesce) {
temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
}
spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
return 0;
}
/**
* nes_netdev_get_pauseparam
*/
static void nes_netdev_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *et_pauseparam)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
et_pauseparam->autoneg = 0;
et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0;
et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0;
}
/**
* nes_netdev_set_pauseparam
*/
static int nes_netdev_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *et_pauseparam)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 u32temp;
if (et_pauseparam->autoneg) {
/* TODO: should return unsupported */
return 0;
}
if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 0;
} else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 1;
}
if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
nesdev->disable_rx_flow_control = 0;
} else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
nesdev->disable_rx_flow_control = 1;
}
return 0;
}
/**
* nes_netdev_get_settings
*/
static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 mac_index = nesdev->mac_index;
u8 phy_type = nesadapter->phy_type[mac_index];
u8 phy_index = nesadapter->phy_index[mac_index];
u16 phy_data;
et_cmd->duplex = DUPLEX_FULL;
et_cmd->port = PORT_MII;
et_cmd->maxtxpkt = 511;
et_cmd->maxrxpkt = 511;
if (nesadapter->OneG_Mode) {
et_cmd->speed = SPEED_1000;
if (phy_type == NES_PHY_TYPE_PUMA_1G) {
et_cmd->supported = SUPPORTED_1000baseT_Full;
et_cmd->advertising = ADVERTISED_1000baseT_Full;
et_cmd->autoneg = AUTONEG_DISABLE;
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->phy_address = mac_index;
} else {
unsigned long flags;
et_cmd->supported = SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg;
et_cmd->advertising = ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x1000)
et_cmd->autoneg = AUTONEG_ENABLE;
else
et_cmd->autoneg = AUTONEG_DISABLE;
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->phy_address = phy_index;
}
return 0;
}
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
(phy_type == NES_PHY_TYPE_SFP_D) ||
(phy_type == NES_PHY_TYPE_KR)) {
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->port = PORT_FIBRE;
et_cmd->supported = SUPPORTED_FIBRE;
et_cmd->advertising = ADVERTISED_FIBRE;
et_cmd->phy_address = phy_index;
} else {
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->supported = SUPPORTED_10000baseT_Full;
et_cmd->advertising = ADVERTISED_10000baseT_Full;
et_cmd->phy_address = mac_index;
}
et_cmd->speed = SPEED_10000;
et_cmd->autoneg = AUTONEG_DISABLE;
return 0;
}
/**
* nes_netdev_set_settings
*/
static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
unsigned long flags;
u16 phy_data;
u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
if (et_cmd->autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
} else {
/* Turn off autoneg */
phy_data &= ~0x1000;
}
nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
return 0;
}
static const struct ethtool_ops nes_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = nes_netdev_get_settings,
.set_settings = nes_netdev_set_settings,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_rx_csum = nes_netdev_get_rx_csum,
.get_sg = ethtool_op_get_sg,
.get_strings = nes_netdev_get_strings,
.get_sset_count = nes_netdev_get_sset_count,
.get_ethtool_stats = nes_netdev_get_ethtool_stats,
.get_drvinfo = nes_netdev_get_drvinfo,
.get_coalesce = nes_netdev_get_coalesce,
.set_coalesce = nes_netdev_set_coalesce,
.get_pauseparam = nes_netdev_get_pauseparam,
.set_pauseparam = nes_netdev_set_pauseparam,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_rx_csum = nes_netdev_set_rx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};
static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
unsigned long flags;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nesvnic->vlan_grp = grp;
nes_debug(NES_DBG_NETDEV, "%s: %s\n", __func__, netdev->name);
/* Enable/Disable VLAN Stripping */
u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
if (grp)
u32temp &= 0xfdffffff;
else
u32temp |= 0x02000000;
nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
static const struct net_device_ops nes_netdev_ops = {
.ndo_open = nes_netdev_open,
.ndo_stop = nes_netdev_stop,
.ndo_start_xmit = nes_netdev_start_xmit,
.ndo_get_stats = nes_netdev_get_stats,
.ndo_tx_timeout = nes_netdev_tx_timeout,
.ndo_set_mac_address = nes_netdev_set_mac_address,
.ndo_set_multicast_list = nes_netdev_set_multicast_list,
.ndo_change_mtu = nes_netdev_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_register = nes_netdev_vlan_rx_register,
};
/**
* nes_netdev_init - initialize network device
*/
struct net_device *nes_netdev_init(struct nes_device *nesdev,
void __iomem *mmio_addr)
{
u64 u64temp;
struct nes_vnic *nesvnic;
struct net_device *netdev;
struct nic_qp_map *curr_qp_map;
u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
netdev = alloc_etherdev(sizeof(struct nes_vnic));
if (!netdev) {
printk(KERN_ERR PFX "nesvnic etherdev alloc failed");
return NULL;
}
nesvnic = netdev_priv(netdev);
nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name);
SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev);
netdev->watchdog_timeo = NES_TX_TIMEOUT;
netdev->irq = nesdev->pcidev->irq;
netdev->mtu = ETH_DATA_LEN;
netdev->hard_header_len = ETH_HLEN;
netdev->addr_len = ETH_ALEN;
netdev->type = ARPHRD_ETHER;
netdev->features = NETIF_F_HIGHDMA;
netdev->netdev_ops = &nes_netdev_ops;
netdev->ethtool_ops = &nes_ethtool_ops;
netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
/* Fill in the port structure */
nesvnic->netdev = netdev;
nesvnic->nesdev = nesdev;
nesvnic->msg_enable = netif_msg_init(debug, default_msg);
nesvnic->netdev_index = nesdev->netdev_count;
nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
nesvnic->max_frame_size = netdev->mtu + netdev->hard_header_len + VLAN_HLEN;
curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index;
nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port;
/* Setup the burned in MAC address */
u64temp = (u64)nesdev->nesadapter->mac_addr_low;
u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32;
u64temp += nesvnic->nic_index;
netdev->dev_addr[0] = (u8)(u64temp>>40);
netdev->dev_addr[1] = (u8)(u64temp>>32);
netdev->dev_addr[2] = (u8)(u64temp>>24);
netdev->dev_addr[3] = (u8)(u64temp>>16);
netdev->dev_addr[4] = (u8)(u64temp>>8);
netdev->dev_addr[5] = (u8)u64temp;
memcpy(netdev->perm_addr, netdev->dev_addr, 6);
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
} else {
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
}
nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
" nic_index = %d, logical_port = %d, mac_index = %d.\n",
nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
if (nesvnic->nesdev->nesadapter->port_count == 1 &&
nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
} else {
nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2;
nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
}
} else {
if (nesvnic->nesdev->nesadapter->port_count == 2 ||
(nesvnic->nesdev->nesadapter->port_count == 1 &&
nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = nesvnic->nic_index
+ 2;
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
} else {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = 0xf;
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
}
}
nesvnic->next_qp_nic_index = 0;
if (nesdev->netdev_count == 0) {
nesvnic->rdma_enabled = 1;
} else {
nesvnic->rdma_enabled = 0;
}
nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
spin_lock_init(&nesvnic->tx_lock);
nesdev->netdev[nesdev->netdev_count] = netdev;
nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
nesvnic, nesdev->mac_index);
list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
if ((nesdev->netdev_count == 0) &&
((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
((phy_type == NES_PHY_TYPE_PUMA_1G) &&
(((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
u32 u32temp;
u32 link_mask;
u32 link_val;
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
if (phy_type != NES_PHY_TYPE_PUMA_1G) {
u32temp |= 0x00200000;
nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)), u32temp);
}
/* Check and set linkup here. This is for back to back */
/* configuration where second port won't get link interrupt */
switch (phy_type) {
case NES_PHY_TYPE_PUMA_1G:
if (nesdev->mac_index < 2) {
link_mask = 0x01010000;
link_val = 0x01010000;
} else {
link_mask = 0x02020000;
link_val = 0x02020000;
}
break;
default:
link_mask = 0x0f1f0000;
link_val = 0x0f0f0000;
break;
}
u32temp = nes_read_indexed(nesdev,
NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
if ((u32temp & link_mask) == link_val)
nesvnic->linkup = 1;
/* clear the MAC interrupt status, assumes direct logical to physical mapping */
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
nes_init_phy(nesdev);
}
return netdev;
}
/**
* nes_netdev_destroy - destroy network device structure
*/
void nes_netdev_destroy(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
/* make sure 'stop' method is called by Linux stack */
/* nes_netdev_stop(netdev); */
list_del(&nesvnic->list);
if (nesvnic->of_device_registered) {
nes_destroy_ofa_device(nesvnic->nesibdev);
}
free_netdev(netdev);
}
/**
* nes_nic_cm_xmit -- CM calls this to send out pkts
*/
int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int ret;
skb->dev = netdev;
ret = dev_queue_xmit(skb);
if (ret) {
nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret);
}
return ret;
}
| gpl-2.0 |
delafer/YP-GI1CW | drivers/staging/vt6655/rxtx.c | 761 | 131614 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: rxtx.c
*
* Purpose: handle WMAC/802.3/802.11 rx & tx functions
*
* Author: Lyndon Chen
*
* Date: May 20, 2003
*
* Functions:
* s_vGenerateTxParameter - Generate tx dma required parameter.
* vGenerateMACHeader - Translate 802.3 to 802.11 header
* cbGetFragCount - Caculate fragment number count
* csBeacon_xmit - beacon tx function
* csMgmt_xmit - management tx function
* s_cbFillTxBufHead - fulfill tx dma buffer header
* s_uGetDataDuration - get tx data required duration
* s_uFillDataHead- fulfill tx data duration header
* s_uGetRTSCTSDuration- get rtx/cts required duration
* s_uGetRTSCTSRsvTime- get rts/cts reserved time
* s_uGetTxRsvTime- get frame reserved time
* s_vFillCTSHead- fulfill CTS ctl header
* s_vFillFragParameter- Set fragment ctl parameter.
* s_vFillRTSHead- fulfill RTS ctl header
* s_vFillTxKey- fulfill tx encrypt key
* s_vSWencryption- Software encrypt header
* vDMA0_tx_80211- tx 802.11 frame via dma0
* vGenerateFIFOHeader- Generate tx FIFO ctl header
*
* Revision History:
*
*/
#include "device.h"
#include "rxtx.h"
#include "tether.h"
#include "card.h"
#include "bssdb.h"
#include "mac.h"
#include "baseband.h"
#include "michael.h"
#include "tkip.h"
#include "tcrc.h"
#include "wctl.h"
#include "wroute.h"
#include "hostap.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
#define PLICE_DEBUG
/*--------------------- Static Functions --------------------------*/
/*--------------------- Static Definitions -------------------------*/
#define CRITICAL_PACKET_LEN 256 // if packet size < 256 -> in-direct send
// packet size >= 256 -> direct send
const WORD wTimeStampOff[2][MAX_RATE] = {
{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, // Long Preamble
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, // Short Preamble
};
const WORD wFB_Opt0[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, // fallback_rate0
{RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, // fallback_rate1
};
const WORD wFB_Opt1[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, // fallback_rate0
{RATE_6M , RATE_6M, RATE_12M, RATE_12M, RATE_18M}, // fallback_rate1
};
#define RTSDUR_BB 0
#define RTSDUR_BA 1
#define RTSDUR_AA 2
#define CTSDUR_BA 3
#define RTSDUR_BA_F0 4
#define RTSDUR_AA_F0 5
#define RTSDUR_BA_F1 6
#define RTSDUR_AA_F1 7
#define CTSDUR_BA_F0 8
#define CTSDUR_BA_F1 9
#define DATADUR_B 10
#define DATADUR_A 11
#define DATADUR_A_F0 12
#define DATADUR_A_F1 13
/*--------------------- Static Functions --------------------------*/
static
void
s_vFillTxKey(
PSDevice pDevice,
PBYTE pbyBuf,
PBYTE pbyIVHead,
PSKeyItem pTransmitKey,
PBYTE pbyHdrBuf,
WORD wPayloadLen,
PBYTE pMICHDR
);
static
void
s_vFillRTSHead(
PSDevice pDevice,
BYTE byPktType,
void * pvRTS,
UINT cbFrameLength,
BOOL bNeedAck,
BOOL bDisCRC,
PSEthernetHeader psEthHeader,
WORD wCurrentRate,
BYTE byFBOption
);
static
void
s_vGenerateTxParameter(
PSDevice pDevice,
BYTE byPktType,
void * pTxBufHead,
void * pvRrvTime,
void * pvRTS,
void * pvCTS,
UINT cbFrameSize,
BOOL bNeedACK,
UINT uDMAIdx,
PSEthernetHeader psEthHeader,
WORD wCurrentRate
);
static void s_vFillFragParameter(
PSDevice pDevice,
PBYTE pbyBuffer,
UINT uTxType,
void * pvtdCurr,
WORD wFragType,
UINT cbReqCount
);
static
UINT
s_cbFillTxBufHead (
PSDevice pDevice,
BYTE byPktType,
PBYTE pbyTxBufferAddr,
UINT cbFrameBodySize,
UINT uDMAIdx,
PSTxDesc pHeadTD,
PSEthernetHeader psEthHeader,
PBYTE pPacket,
BOOL bNeedEncrypt,
PSKeyItem pTransmitKey,
UINT uNodeIndex,
PUINT puMACfragNum
);
static
UINT
s_uFillDataHead (
PSDevice pDevice,
BYTE byPktType,
void * pTxDataHead,
UINT cbFrameLength,
UINT uDMAIdx,
BOOL bNeedAck,
UINT uFragIdx,
UINT cbLastFragmentSize,
UINT uMACfragNum,
BYTE byFBOption,
WORD wCurrentRate
);
/*--------------------- Export Variables --------------------------*/
static
void
s_vFillTxKey (
PSDevice pDevice,
PBYTE pbyBuf,
PBYTE pbyIVHead,
PSKeyItem pTransmitKey,
PBYTE pbyHdrBuf,
WORD wPayloadLen,
PBYTE pMICHDR
)
{
PDWORD pdwIV = (PDWORD) pbyIVHead;
PDWORD pdwExtIV = (PDWORD) ((PBYTE)pbyIVHead+4);
WORD wValue;
PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf;
DWORD dwRevIVCounter;
BYTE byKeyIndex = 0;
//Fill TXKEY
if (pTransmitKey == NULL)
return;
dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
*pdwIV = pDevice->dwIVCounter;
byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN ){
memcpy(pDevice->abyPRNG, (PBYTE)&(dwRevIVCounter), 3);
memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
} else {
memcpy(pbyBuf, (PBYTE)&(dwRevIVCounter), 3);
memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
if(pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
memcpy(pbyBuf+8, (PBYTE)&(dwRevIVCounter), 3);
memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
}
memcpy(pDevice->abyPRNG, pbyBuf, 16);
}
// Append IV after Mac Header
*pdwIV &= WEP_IV_MASK;//00000000 11111111 11111111 11111111
*pdwIV |= (byKeyIndex << 30);
*pdwIV = cpu_to_le32(*pdwIV);
pDevice->dwIVCounter++;
if (pDevice->dwIVCounter > WEP_IV_MASK) {
pDevice->dwIVCounter = 0;
}
} else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
pTransmitKey->wTSC15_0++;
if (pTransmitKey->wTSC15_0 == 0) {
pTransmitKey->dwTSC47_16++;
}
TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
memcpy(pbyBuf, pDevice->abyPRNG, 16);
// Make IV
memcpy(pdwIV, pDevice->abyPRNG, 3);
*(pbyIVHead+3) = (BYTE)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
// Append IV&ExtIV after Mac Header
*pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
} else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
pTransmitKey->wTSC15_0++;
if (pTransmitKey->wTSC15_0 == 0) {
pTransmitKey->dwTSC47_16++;
}
memcpy(pbyBuf, pTransmitKey->abyKey, 16);
// Make IV
*pdwIV = 0;
*(pbyIVHead+3) = (BYTE)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
*pdwIV |= cpu_to_le16((WORD)(pTransmitKey->wTSC15_0));
//Append IV&ExtIV after Mac Header
*pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
//Fill MICHDR0
*pMICHDR = 0x59;
*((PBYTE)(pMICHDR+1)) = 0; // TxPriority
memcpy(pMICHDR+2, &(pMACHeader->abyAddr2[0]), 6);
*((PBYTE)(pMICHDR+8)) = HIBYTE(HIWORD(pTransmitKey->dwTSC47_16));
*((PBYTE)(pMICHDR+9)) = LOBYTE(HIWORD(pTransmitKey->dwTSC47_16));
*((PBYTE)(pMICHDR+10)) = HIBYTE(LOWORD(pTransmitKey->dwTSC47_16));
*((PBYTE)(pMICHDR+11)) = LOBYTE(LOWORD(pTransmitKey->dwTSC47_16));
*((PBYTE)(pMICHDR+12)) = HIBYTE(pTransmitKey->wTSC15_0);
*((PBYTE)(pMICHDR+13)) = LOBYTE(pTransmitKey->wTSC15_0);
*((PBYTE)(pMICHDR+14)) = HIBYTE(wPayloadLen);
*((PBYTE)(pMICHDR+15)) = LOBYTE(wPayloadLen);
//Fill MICHDR1
*((PBYTE)(pMICHDR+16)) = 0; // HLEN[15:8]
if (pDevice->bLongHeader) {
*((PBYTE)(pMICHDR+17)) = 28; // HLEN[7:0]
} else {
*((PBYTE)(pMICHDR+17)) = 22; // HLEN[7:0]
}
wValue = cpu_to_le16(pMACHeader->wFrameCtl & 0xC78F);
memcpy(pMICHDR+18, (PBYTE)&wValue, 2); // MSKFRACTL
memcpy(pMICHDR+20, &(pMACHeader->abyAddr1[0]), 6);
memcpy(pMICHDR+26, &(pMACHeader->abyAddr2[0]), 6);
//Fill MICHDR2
memcpy(pMICHDR+32, &(pMACHeader->abyAddr3[0]), 6);
wValue = pMACHeader->wSeqCtl;
wValue &= 0x000F;
wValue = cpu_to_le16(wValue);
memcpy(pMICHDR+38, (PBYTE)&wValue, 2); // MSKSEQCTL
if (pDevice->bLongHeader) {
memcpy(pMICHDR+40, &(pMACHeader->abyAddr4[0]), 6);
}
}
}
static
void
s_vSWencryption (
PSDevice pDevice,
PSKeyItem pTransmitKey,
PBYTE pbyPayloadHead,
WORD wPayloadSize
)
{
UINT cbICVlen = 4;
DWORD dwICV = 0xFFFFFFFFL;
PDWORD pdwICV;
if (pTransmitKey == NULL)
return;
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
//=======================================================================
// Append ICV after payload
dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
pdwICV = (PDWORD)(pbyPayloadHead + wPayloadSize);
// finally, we must invert dwCRC to get the correct answer
*pdwICV = cpu_to_le32(~dwICV);
// RC4 encryption
rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength + 3);
rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
//=======================================================================
} else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
//=======================================================================
//Append ICV after payload
dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
pdwICV = (PDWORD)(pbyPayloadHead + wPayloadSize);
// finally, we must invert dwCRC to get the correct answer
*pdwICV = cpu_to_le32(~dwICV);
// RC4 encryption
rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
//=======================================================================
}
}
/*byPktType : PK_TYPE_11A 0
PK_TYPE_11B 1
PK_TYPE_11GB 2
PK_TYPE_11GA 3
*/
static
UINT
s_uGetTxRsvTime (
PSDevice pDevice,
BYTE byPktType,
UINT cbFrameLength,
WORD wRate,
BOOL bNeedAck
)
{
UINT uDataTime, uAckTime;
uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
#ifdef PLICE_DEBUG
//printk("s_uGetTxRsvTime is %d\n",uDataTime);
#endif
if (byPktType == PK_TYPE_11B) {//llb,CCK mode
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (WORD)pDevice->byTopCCKBasicRate);
} else {//11g 2.4G OFDM mode & 11a 5G OFDM mode
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (WORD)pDevice->byTopOFDMBasicRate);
}
if (bNeedAck) {
return (uDataTime + pDevice->uSIFS + uAckTime);
}
else {
return uDataTime;
}
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
UINT
s_uGetRTSCTSRsvTime (
PSDevice pDevice,
BYTE byRTSRsvType,
BYTE byPktType,
UINT cbFrameLength,
WORD wCurrentRate
)
{
UINT uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
if (byRTSRsvType == 0) { //RTSTxRrvTime_bb
uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
}
else if (byRTSRsvType == 1){ //RTSTxRrvTime_ba, only in 2.4GHZ
uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
}
else if (byRTSRsvType == 2) { //RTSTxRrvTime_aa
uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
}
else if (byRTSRsvType == 3) { //CTSTxRrvTime_ba, only in 2.4GHZ
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uRrvTime = uCTSTime + uAckTime + uDataTime + 2*pDevice->uSIFS;
return uRrvTime;
}
//RTSRrvTime
uRrvTime = uRTSTime + uCTSTime + uAckTime + uDataTime + 3*pDevice->uSIFS;
return uRrvTime;
}
//byFreqType 0: 5GHz, 1:2.4Ghz
static
UINT
s_uGetDataDuration (
PSDevice pDevice,
BYTE byDurType,
UINT cbFrameLength,
BYTE byPktType,
WORD wRate,
BOOL bNeedAck,
UINT uFragIdx,
UINT cbLastFragmentSize,
UINT uMACfragNum,
BYTE byFBOption
)
{
BOOL bLastFrag = 0;
UINT uAckTime =0, uNextPktTime = 0;
if (uFragIdx == (uMACfragNum-1)) {
bLastFrag = 1;
}
switch (byDurType) {
case DATADUR_B: //DATADUR_B
if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
return (pDevice->uSIFS + uAckTime);
} else {
return 0;
}
}
else {//First Frag or Mid Frag
if (uFragIdx == (uMACfragNum-2)) {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
}
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
return (pDevice->uSIFS + uAckTime + uNextPktTime);
} else {
return (pDevice->uSIFS + uNextPktTime);
}
}
break;
case DATADUR_A: //DATADUR_A
if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
if(bNeedAck){
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return (pDevice->uSIFS + uAckTime);
} else {
return 0;
}
}
else {//First Frag or Mid Frag
if(uFragIdx == (uMACfragNum-2)){
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
}
if(bNeedAck){
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return (pDevice->uSIFS + uAckTime + uNextPktTime);
} else {
return (pDevice->uSIFS + uNextPktTime);
}
}
break;
case DATADUR_A_F0: //DATADUR_A_F0
if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
if(bNeedAck){
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return (pDevice->uSIFS + uAckTime);
} else {
return 0;
}
}
else { //First Frag or Mid Frag
if (byFBOption == AUTO_FB_0) {
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if(uFragIdx == (uMACfragNum-2)){
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
} else { // (byFBOption == AUTO_FB_1)
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if(uFragIdx == (uMACfragNum-2)){
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
}
if(bNeedAck){
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return (pDevice->uSIFS + uAckTime + uNextPktTime);
} else {
return (pDevice->uSIFS + uNextPktTime);
}
}
break;
case DATADUR_A_F1: //DATADUR_A_F1
if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
if(bNeedAck){
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return (pDevice->uSIFS + uAckTime);
} else {
return 0;
}
}
else { //First Frag or Mid Frag
if (byFBOption == AUTO_FB_0) {
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if(uFragIdx == (uMACfragNum-2)){
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
} else { // (byFBOption == AUTO_FB_1)
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if(uFragIdx == (uMACfragNum-2)){
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
}
if(bNeedAck){
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return (pDevice->uSIFS + uAckTime + uNextPktTime);
} else {
return (pDevice->uSIFS + uNextPktTime);
}
}
break;
default:
break;
}
ASSERT(FALSE);
return 0;
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
UINT
s_uGetRTSCTSDuration (
PSDevice pDevice,
BYTE byDurType,
UINT cbFrameLength,
BYTE byPktType,
WORD wRate,
BOOL bNeedAck,
BYTE byFBOption
)
{
UINT uCTSTime = 0, uDurTime = 0;
switch (byDurType) {
case RTSDUR_BB: //RTSDuration_bb
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA: //RTSDuration_ba
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_AA: //RTSDuration_aa
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case CTSDUR_BA: //CTSDuration_ba
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA_F0: //RTSDuration_ba_f0
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
break;
case RTSDUR_AA_F0: //RTSDuration_aa_f0
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
break;
case RTSDUR_BA_F1: //RTSDuration_ba_f1
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
break;
case RTSDUR_AA_F1: //RTSDuration_aa_f1
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
break;
case CTSDUR_BA_F0: //CTSDuration_ba_f0
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
break;
case CTSDUR_BA_F1: //CTSDuration_ba_f1
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <=RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
break;
default:
break;
}
return uDurTime;
}
static
UINT
s_uFillDataHead (
PSDevice pDevice,
BYTE byPktType,
void * pTxDataHead,
UINT cbFrameLength,
UINT uDMAIdx,
BOOL bNeedAck,
UINT uFragIdx,
UINT cbLastFragmentSize,
UINT uMACfragNum,
BYTE byFBOption,
WORD wCurrentRate
)
{
WORD wLen = 0x0000;
if (pTxDataHead == NULL) {
return 0;
}
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
PSTxDataHead_g pBuf = (PSTxDataHead_g)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get Duration and TimeStamp
pBuf->wDuration_a = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
byPktType, wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption)); //1: 2.4GHz
pBuf->wDuration_b = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
PK_TYPE_11B, pDevice->byTopCCKBasicRate,
bNeedAck, uFragIdx, cbLastFragmentSize,
uMACfragNum, byFBOption)); //1: 2.4
pBuf->wTimeStampOff_a = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
pBuf->wTimeStampOff_b = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE]);
return (pBuf->wDuration_a);
} else {
// Auto Fallback
PSTxDataHead_g_FB pBuf = (PSTxDataHead_g_FB)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get Duration and TimeStamp
pBuf->wDuration_a = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wDuration_b = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wDuration_a_f0 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wDuration_a_f1 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wTimeStampOff_a = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
pBuf->wTimeStampOff_b = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE]);
return (pBuf->wDuration_a);
} //if (byFBOption == AUTO_FB_NONE)
}
else if (byPktType == PK_TYPE_11A) {
if ((byFBOption != AUTO_FB_NONE)) {
// Auto Fallback
PSTxDataHead_a_FB pBuf = (PSTxDataHead_a_FB)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
pBuf->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
pBuf->wDuration_f0 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
pBuf->wDuration_f1 = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
return (pBuf->wDuration);
} else {
PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
pBuf->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
return (pBuf->wDuration);
}
}
else {
PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
pBuf->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
return (pBuf->wDuration);
}
return 0;
}
static
void
s_vFillRTSHead (
PSDevice pDevice,
BYTE byPktType,
void * pvRTS,
UINT cbFrameLength,
BOOL bNeedAck,
BOOL bDisCRC,
PSEthernetHeader psEthHeader,
WORD wCurrentRate,
BYTE byFBOption
)
{
UINT uRTSFrameLen = 20;
WORD wLen = 0x0000;
if (pvRTS == NULL)
return;
if (bDisCRC) {
// When CRCDIS bit is on, H/W forgot to generate FCS for RTS frame,
// in this case we need to decrease its length by 4.
uRTSFrameLen -= 4;
}
// Note: So far RTSHead dosen't appear in ATIM & Beacom DMA, so we don't need to take them into account.
// Otherwise, we need to modify codes for them.
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
PSRTS_g pBuf = (PSRTS_g)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration_bb = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
pBuf->wDuration_aa = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3: 2.4G OFDMData
pBuf->wDuration_ba = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->Data.wDurationID = pBuf->wDuration_aa;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
else {
PSRTS_g_FB pBuf = (PSRTS_g_FB)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_a), (PBYTE)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration_bb = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
pBuf->wDuration_aa = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3:2.4G OFDMData
pBuf->wDuration_ba = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDMData
pBuf->wRTSDuration_ba_f0 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //4:wRTSDuration_ba_f0, 1:2.4G, 1:CCKData
pBuf->wRTSDuration_aa_f0 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:wRTSDuration_aa_f0, 1:2.4G, 1:CCKData
pBuf->wRTSDuration_ba_f1 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //6:wRTSDuration_ba_f1, 1:2.4G, 1:CCKData
pBuf->wRTSDuration_aa_f1 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:wRTSDuration_aa_f1, 1:2.4G, 1:CCKData
pBuf->Data.wDurationID = pBuf->wDuration_aa;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
} // if (byFBOption == AUTO_FB_NONE)
}
else if (byPktType == PK_TYPE_11A) {
if (byFBOption == AUTO_FB_NONE) {
PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
else {
PSRTS_a_FB pBuf = (PSRTS_a_FB)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
pBuf->wRTSDuration_f0 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:RTSDuration_aa_f0, 0:5G, 0: 5G OFDMData
pBuf->wRTSDuration_f1 = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:RTSDuration_aa_f1, 0:5G, 0:
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
}
else if (byPktType == PK_TYPE_11B) {
PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField), (PBYTE)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
}
static
void
s_vFillCTSHead (
PSDevice pDevice,
UINT uDMAIdx,
BYTE byPktType,
void * pvCTS,
UINT cbFrameLength,
BOOL bNeedAck,
BOOL bDisCRC,
WORD wCurrentRate,
BYTE byFBOption
)
{
UINT uCTSFrameLen = 14;
WORD wLen = 0x0000;
if (pvCTS == NULL) {
return;
}
if (bDisCRC) {
// When CRCDIS bit is on, H/W forgot to generate FCS for CTS frame,
// in this case we need to decrease its length by 4.
uCTSFrameLen -= 4;
}
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) {
// Auto Fall back
PSCTS_FB pBuf = (PSCTS_FB)pvCTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
pBuf->wDuration_ba = (WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wDuration_ba += pDevice->wCTSDuration;
pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
//Get CTSDuration_ba_f0
pBuf->wCTSDuration_ba_f0 = (WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //8:CTSDuration_ba_f0, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wCTSDuration_ba_f0 += pDevice->wCTSDuration;
pBuf->wCTSDuration_ba_f0 = cpu_to_le16(pBuf->wCTSDuration_ba_f0);
//Get CTSDuration_ba_f1
pBuf->wCTSDuration_ba_f1 = (WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //9:CTSDuration_ba_f1, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wCTSDuration_ba_f1 += pDevice->wCTSDuration;
pBuf->wCTSDuration_ba_f1 = cpu_to_le16(pBuf->wCTSDuration_ba_f1);
//Get CTS Frame body
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN);
} else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
PSCTS pBuf = (PSCTS)pvCTS;
//Get SignalField,ServiceField,Length
BBvCaculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(PWORD)&(wLen), (PBYTE)&(pBuf->byServiceField_b), (PBYTE)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get CTSDuration_ba
pBuf->wDuration_ba = cpu_to_le16((WORD)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wDuration_ba += pDevice->wCTSDuration;
pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
//Get CTS Frame body
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN);
}
}
}
/*+
*
* Description:
* Generate FIFO control for MAC & Baseband controller
*
* Parameters:
* In:
* pDevice - Pointer to adapter
* pTxDataHead - Transmit Data Buffer
* pTxBufHead - pTxBufHead
* pvRrvTime - pvRrvTime
* pvRTS - RTS Buffer
* pCTS - CTS Buffer
* cbFrameSize - Transmit Data Length (Hdr+Payload+FCS)
* bNeedACK - If need ACK
* uDescIdx - Desc Index
* Out:
* none
*
* Return Value: none
*
-*/
// UINT cbFrameSize,//Hdr+Payload+FCS
static
void
s_vGenerateTxParameter (
PSDevice pDevice,
BYTE byPktType,
void * pTxBufHead,
void * pvRrvTime,
void * pvRTS,
void * pvCTS,
UINT cbFrameSize,
BOOL bNeedACK,
UINT uDMAIdx,
PSEthernetHeader psEthHeader,
WORD wCurrentRate
)
{
UINT cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
WORD wFifoCtl;
BOOL bDisCRC = FALSE;
BYTE byFBOption = AUTO_FB_NONE;
// WORD wCurrentRate = pDevice->wCurrentRate;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter...\n");
PSTxBufHead pFifoHead = (PSTxBufHead)pTxBufHead;
pFifoHead->wReserved = wCurrentRate;
wFifoCtl = pFifoHead->wFIFOCtl;
if (wFifoCtl & FIFOCTL_CRCDIS) {
bDisCRC = TRUE;
}
if (wFifoCtl & FIFOCTL_AUTO_FB_0) {
byFBOption = AUTO_FB_0;
}
else if (wFifoCtl & FIFOCTL_AUTO_FB_1) {
byFBOption = AUTO_FB_1;
}
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (pvRTS != NULL) { //RTS_need
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_gRTS pBuf = (PSRrvTime_gRTS)pvRrvTime;
pBuf->wRTSTxRrvTime_aa = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 1:2.4GHz
pBuf->wRTSTxRrvTime_ba = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 1, byPktType, cbFrameSize, wCurrentRate));//1:RTSTxRrvTime_ba, 1:2.4GHz
pBuf->wRTSTxRrvTime_bb = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
pBuf->wTxRrvTime_a = cpu_to_le16((WORD) s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
pBuf->wTxRrvTime_b = cpu_to_le16((WORD) s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
}
else {//RTS_needless, PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_gCTS pBuf = (PSRrvTime_gCTS)pvRrvTime;
pBuf->wTxRrvTime_a = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
pBuf->wTxRrvTime_b = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
pBuf->wCTSTxRrvTime_ba = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 3, byPktType, cbFrameSize, wCurrentRate));//3:CTSTxRrvTime_Ba, 1:2.4GHz
}
//Fill CTS
s_vFillCTSHead(pDevice, uDMAIdx, byPktType, pvCTS, cbFrameSize, bNeedACK, bDisCRC, wCurrentRate, byFBOption);
}
}
else if (byPktType == PK_TYPE_11A) {
if (pvRTS != NULL) {//RTS_need, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wRTSTxRrvTime = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 0:5GHz
pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//0:OFDM
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
}
else if (pvRTS == NULL) {//RTS_needless, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK)); //0:OFDM
}
}
}
else if (byPktType == PK_TYPE_11B) {
if ((pvRTS != NULL)) {//RTS_need, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wRTSTxRrvTime = cpu_to_le16((WORD)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK));//1:CCK
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
}
else { //RTS_needless, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wTxRrvTime = cpu_to_le16((WORD)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK)); //1:CCK
}
}
}
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter END.\n");
}
/*
PBYTE pbyBuffer,//point to pTxBufHead
WORD wFragType,//00:Non-Frag, 01:Start, 02:Mid, 03:Last
UINT cbFragmentSize,//Hdr+payoad+FCS
*/
static
void
s_vFillFragParameter(
PSDevice pDevice,
PBYTE pbyBuffer,
UINT uTxType,
void * pvtdCurr,
WORD wFragType,
UINT cbReqCount
)
{
PSTxBufHead pTxBufHead = (PSTxBufHead) pbyBuffer;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vFillFragParameter...\n");
if (uTxType == TYPE_SYNCDMA) {
//PSTxSyncDesc ptdCurr = (PSTxSyncDesc)s_pvGetTxDescHead(pDevice, uTxType, uCurIdx);
PSTxSyncDesc ptdCurr = (PSTxSyncDesc)pvtdCurr;
//Set FIFOCtl & TimeStamp in TxSyncDesc
ptdCurr->m_wFIFOCtl = pTxBufHead->wFIFOCtl;
ptdCurr->m_wTimeStamp = pTxBufHead->wTimeStamp;
//Set TSR1 & ReqCount in TxDescHead
ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
}
else {
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
}
}
else {
//PSTxDesc ptdCurr = (PSTxDesc)s_pvGetTxDescHead(pDevice, uTxType, uCurIdx);
PSTxDesc ptdCurr = (PSTxDesc)pvtdCurr;
//Set TSR1 & ReqCount in TxDescHead
ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
}
else {
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
}
}
pTxBufHead->wFragCtl |= (WORD)wFragType;//0x0001; //0000 0000 0000 0001
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vFillFragParameter END\n");
}
static
UINT
s_cbFillTxBufHead (
PSDevice pDevice,
BYTE byPktType,
PBYTE pbyTxBufferAddr,
UINT cbFrameBodySize,
UINT uDMAIdx,
PSTxDesc pHeadTD,
PSEthernetHeader psEthHeader,
PBYTE pPacket,
BOOL bNeedEncrypt,
PSKeyItem pTransmitKey,
UINT uNodeIndex,
PUINT puMACfragNum
)
{
UINT cbMACHdLen;
UINT cbFrameSize;
UINT cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
UINT cbFragPayloadSize;
UINT cbLastFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
UINT cbLastFragPayloadSize;
UINT uFragIdx;
PBYTE pbyPayloadHead;
PBYTE pbyIVHead;
PBYTE pbyMacHdr;
WORD wFragType; //00:Non-Frag, 01:Start, 10:Mid, 11:Last
UINT uDuration;
PBYTE pbyBuffer;
// UINT uKeyEntryIdx = NUM_KEY_ENTRY+1;
// BYTE byKeySel = 0xFF;
UINT cbIVlen = 0;
UINT cbICVlen = 0;
UINT cbMIClen = 0;
UINT cbFCSlen = 4;
UINT cb802_1_H_len = 0;
UINT uLength = 0;
UINT uTmpLen = 0;
// BYTE abyTmp[8];
// DWORD dwCRC;
UINT cbMICHDR = 0;
DWORD dwMICKey0, dwMICKey1;
DWORD dwMIC_Priority;
PDWORD pdwMIC_L;
PDWORD pdwMIC_R;
DWORD dwSafeMIC_L, dwSafeMIC_R; //Fix "Last Frag Size" < "MIC length".
BOOL bMIC2Frag = FALSE;
UINT uMICFragLen = 0;
UINT uMACfragNum = 1;
UINT uPadding = 0;
UINT cbReqCount = 0;
BOOL bNeedACK;
BOOL bRTS;
BOOL bIsAdhoc;
PBYTE pbyType;
PSTxDesc ptdCurr;
PSTxBufHead psTxBufHd = (PSTxBufHead) pbyTxBufferAddr;
// UINT tmpDescIdx;
UINT cbHeaderLength = 0;
void * pvRrvTime;
PSMICHDRHead pMICHDR;
void * pvRTS;
void * pvCTS;
void * pvTxDataHd;
WORD wTxBufSize; // FFinfo size
UINT uTotalCopyLength = 0;
BYTE byFBOption = AUTO_FB_NONE;
BOOL bIsWEP256 = FALSE;
PSMgmtObject pMgmt = pDevice->pMgmt;
pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_cbFillTxBufHead...\n");
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
if (IS_MULTICAST_ADDRESS(&(psEthHeader->abyDstAddr[0])) ||
IS_BROADCAST_ADDRESS(&(psEthHeader->abyDstAddr[0]))) {
bNeedACK = FALSE;
}
else {
bNeedACK = TRUE;
}
bIsAdhoc = TRUE;
}
else {
// MSDUs in Infra mode always need ACK
bNeedACK = TRUE;
bIsAdhoc = FALSE;
}
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
else
cbMACHdLen = WLAN_HDR_ADDR3_LEN;
if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL)) {
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
cbIVlen = 4;
cbICVlen = 4;
if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
bIsWEP256 = TRUE;
}
}
if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
}
if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
cbMICHDR = sizeof(SMICHDRHead);
}
if (pDevice->byLocalID > REV_ID_VT3253_A1) {
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMACHdLen%4);
uPadding %= 4;
}
}
cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
if ((bNeedACK == FALSE) ||
(cbFrameSize < pDevice->wRTSThreshold) ||
((cbFrameSize >= pDevice->wFragmentationThreshold) && (pDevice->wFragmentationThreshold <= pDevice->wRTSThreshold))
) {
bRTS = FALSE;
}
else {
bRTS = TRUE;
psTxBufHd->wFIFOCtl |= (FIFOCTL_RTS | FIFOCTL_LRETRY);
}
//
// Use for AUTO FALL BACK
//
if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_0) {
byFBOption = AUTO_FB_0;
}
else if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_1) {
byFBOption = AUTO_FB_1;
}
//////////////////////////////////////////////////////
//Set RrvTime/RTS/CTS Buffer
wTxBufSize = sizeof(STxBufHead);
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == TRUE) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g) + sizeof(STxDataHead_g);
}
else { //RTS_needless
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvRTS = NULL;
pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
}
} else {
// Auto Fall Back
if (bRTS == TRUE) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB) + sizeof(STxDataHead_g_FB);
}
else { //RTS_needless
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvRTS = NULL;
pvCTS = (PSCTS_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB) + sizeof(STxDataHead_g_FB);
}
} // Auto Fall Back
}
else {//802.11a/b packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == TRUE) {
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab));
cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab) + sizeof(STxDataHead_ab);
}
else { //RTS_needless, need MICHDR
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
}
} else {
// Auto Fall Back
if (bRTS == TRUE) {//RTS_need
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB));
cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB) + sizeof(STxDataHead_a_FB);
}
else { //RTS_needless
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_a_FB);
}
} // Auto Fall Back
}
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
//////////////////////////////////////////////////////////////////
if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
}
else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
}
else {
dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[24]);
dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[28]);
}
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
}
///////////////////////////////////////////////////////////////////
pbyMacHdr = (PBYTE)(pbyTxBufferAddr + cbHeaderLength);
pbyPayloadHead = (PBYTE)(pbyMacHdr + cbMACHdLen + uPadding + cbIVlen);
pbyIVHead = (PBYTE)(pbyMacHdr + cbMACHdLen + uPadding);
if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == TRUE) && (bIsWEP256 == FALSE)) {
// Fragmentation
// FragThreshold = Fragment size(Hdr+(IV)+fragment payload+(MIC)+(ICV)+FCS)
cbFragmentSize = pDevice->wFragmentationThreshold;
cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
//FragNum = (FrameSize-(Hdr+FCS))/(Fragment Size -(Hrd+FCS)))
uMACfragNum = (WORD) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
if (cbLastFragPayloadSize == 0) {
cbLastFragPayloadSize = cbFragPayloadSize;
} else {
uMACfragNum++;
}
//[Hdr+(IV)+last fragment payload+(MIC)+(ICV)+FCS]
cbLastFragmentSize = cbMACHdLen + cbLastFragPayloadSize + cbIVlen + cbICVlen + cbFCSlen;
for (uFragIdx = 0; uFragIdx < uMACfragNum; uFragIdx ++) {
if (uFragIdx == 0) {
//=========================
// Start Fragmentation
//=========================
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Start Fragmentation...\n");
wFragType = FRAGCTL_STAFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
if (bNeedEncrypt == TRUE) {
//Fill TXKEY
s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (WORD)cbFragPayloadSize, (PBYTE)pMICHDR);
//Fill IV(ExtIV,RSNHDR)
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
// 802.1H
if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
}
else {
memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
}
pbyType = (PBYTE) (pbyPayloadHead + 6);
memcpy(pbyType, &(psEthHeader->wType), sizeof(WORD));
cb802_1_H_len = 8;
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
//---------------------------
// S/W or H/W Encryption
//---------------------------
//Fill MICHDR
//if (pDevice->bAES) {
// s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFragPayloadSize);
//}
//cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel,
// pbyPayloadHead, (WORD)cbFragPayloadSize, uDMAIdx);
//pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr;
pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength), (pPacket + 14), (cbFragPayloadSize - cb802_1_H_len));
uTotalCopyLength += cbFragPayloadSize - cb802_1_H_len;
if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Start MIC: %d\n", cbFragPayloadSize);
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFragPayloadSize);
}
//---------------------------
// S/W Encryption
//---------------------------
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (WORD)cbFragPayloadSize);
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
//--------------------
//1.Set TSR1 & ReqCount in TxDescHead
//2.Set FragCtl in TxBufferHead
//3.Set Frame Control
//4.Set Sequence Control
//5.Get S/W generate FCS
//--------------------
s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
pDevice->iTDUsed[uDMAIdx]++;
pHeadTD = ptdCurr->next;
}
else if (uFragIdx == (uMACfragNum-1)) {
//=========================
// Last Fragmentation
//=========================
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Last Fragmentation...\n");
//tmpDescIdx = (uDescIdx + uFragIdx) % pDevice->cbTD[uDMAIdx];
wFragType = FRAGCTL_ENDFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbLastFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbLastFragmentSize, uDMAIdx, bNeedACK,
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
if (bNeedEncrypt == TRUE) {
//Fill TXKEY
s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (WORD)cbLastFragPayloadSize, (PBYTE)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbLastFragPayloadSize;
//---------------------------
// S/W or H/W Encryption
//---------------------------
pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
//pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
if (bMIC2Frag == FALSE) {
memcpy((pbyBuffer + uLength),
(pPacket + 14 + uTotalCopyLength),
(cbLastFragPayloadSize - cbMIClen)
);
//TODO check uTmpLen !
uTmpLen = cbLastFragPayloadSize - cbMIClen;
}
if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen:%d, cbLastFragPayloadSize:%d, uTmpLen:%d\n",
uMICFragLen, cbLastFragPayloadSize, uTmpLen);
if (bMIC2Frag == FALSE) {
if (uTmpLen != 0)
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
pdwMIC_L = (PDWORD)(pbyBuffer + uLength + uTmpLen);
pdwMIC_R = (PDWORD)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Last MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
} else {
if (uMICFragLen >= 4) {
memcpy((pbyBuffer + uLength), ((PBYTE)&dwSafeMIC_R + (uMICFragLen - 4)),
(cbMIClen - uMICFragLen));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen >= 4: %X, %d\n",
*(PBYTE)((PBYTE)&dwSafeMIC_R + (uMICFragLen - 4)),
(cbMIClen - uMICFragLen));
} else {
memcpy((pbyBuffer + uLength), ((PBYTE)&dwSafeMIC_L + uMICFragLen),
(4 - uMICFragLen));
memcpy((pbyBuffer + uLength + (4 - uMICFragLen)), &dwSafeMIC_R, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"LAST: uMICFragLen < 4: %X, %d\n",
*(PBYTE)((PBYTE)&dwSafeMIC_R + uMICFragLen - 4),
(cbMIClen - uMICFragLen));
}
/*
for (ii = 0; ii < cbLastFragPayloadSize + 8 + 24; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength) + ii - 8 - 24)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n\n");
*/
}
MIC_vUnInit();
} else {
ASSERT(uTmpLen == (cbLastFragPayloadSize - cbMIClen));
}
//---------------------------
// S/W Encryption
//---------------------------
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (WORD)cbLastFragPayloadSize);
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
//--------------------
//1.Set TSR1 & ReqCount in TxDescHead
//2.Set FragCtl in TxBufferHead
//3.Set Frame Control
//4.Set Sequence Control
//5.Get S/W generate FCS
//--------------------
s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
pDevice->iTDUsed[uDMAIdx]++;
pHeadTD = ptdCurr->next;
}
else {
//=========================
// Middle Fragmentation
//=========================
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Middle Fragmentation...\n");
//tmpDescIdx = (uDescIdx + uFragIdx) % pDevice->cbTD[uDMAIdx];
wFragType = FRAGCTL_MIDFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
if (bNeedEncrypt == TRUE) {
//Fill TXKEY
s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (WORD)cbFragPayloadSize, (PBYTE)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
//---------------------------
// S/W or H/W Encryption
//---------------------------
//Fill MICHDR
//if (pDevice->bAES) {
// s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFragPayloadSize);
//}
//cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel,
// pbyPayloadHead, (WORD)cbFragPayloadSize, uDMAIdx);
pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
//pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength),
(pPacket + 14 + uTotalCopyLength),
cbFragPayloadSize
);
uTmpLen = cbFragPayloadSize;
uTotalCopyLength += uTmpLen;
if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
if (uTmpLen < cbFragPayloadSize) {
bMIC2Frag = TRUE;
uMICFragLen = cbFragPayloadSize - uTmpLen;
ASSERT(uMICFragLen < cbMIClen);
pdwMIC_L = (PDWORD)(pbyBuffer + uLength + uTmpLen);
pdwMIC_R = (PDWORD)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
dwSafeMIC_L = *pdwMIC_L;
dwSafeMIC_R = *pdwMIC_R;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIDDLE: uMICFragLen:%d, cbFragPayloadSize:%d, uTmpLen:%d\n",
uMICFragLen, cbFragPayloadSize, uTmpLen);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Fill MIC in Middle frag [%d]\n", uMICFragLen);
/*
for (ii = 0; ii < uMICFragLen; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength + uTmpLen) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
*/
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Middle frag len: %d\n", uTmpLen);
/*
for (ii = 0; ii < uTmpLen; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n\n");
*/
} else {
ASSERT(uTmpLen == (cbFragPayloadSize));
}
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (WORD)cbFragPayloadSize);
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
//--------------------
//1.Set TSR1 & ReqCount in TxDescHead
//2.Set FragCtl in TxBufferHead
//3.Set Frame Control
//4.Set Sequence Control
//5.Get S/W generate FCS
//--------------------
s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
pDevice->iTDUsed[uDMAIdx]++;
pHeadTD = ptdCurr->next;
}
} // for (uMACfragNum)
}
else {
//=========================
// No Fragmentation
//=========================
//DBG_PRTGRP03(("No Fragmentation...\n"));
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No Fragmentation...\n");
wFragType = FRAGCTL_NONFRAG;
//Set FragCtl in TxBufferHead
psTxBufHd->wFragCtl |= (WORD)wFragType;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (WORD)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, 0);
if (bNeedEncrypt == TRUE) {
//Fill TXKEY
s_vFillTxKey(pDevice, (PBYTE)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (WORD)cbFrameBodySize, (PBYTE)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
// 802.1H
if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
}
else {
memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
}
pbyType = (PBYTE) (pbyPayloadHead + 6);
memcpy(pbyType, &(psEthHeader->wType), sizeof(WORD));
cb802_1_H_len = 8;
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen);
//---------------------------
// S/W or H/W Encryption
//---------------------------
//Fill MICHDR
//if (pDevice->bAES) {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Fill MICHDR...\n");
// s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFrameBodySize);
//}
pbyBuffer = (PBYTE)pHeadTD->pTDInfo->buf;
//pbyBuffer = (PBYTE)pDevice->aamTxBuf[uDMAIdx][uDescIdx].pbyVAddr;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength),
(pPacket + 14),
cbFrameBodySize - cb802_1_H_len
);
if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Length:%d, %d\n", cbFrameBodySize - cb802_1_H_len, uLength);
/*
for (ii = 0; ii < (cbFrameBodySize - cb802_1_H_len); ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *((PBYTE)((pbyBuffer + uLength) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
*/
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize);
pdwMIC_L = (PDWORD)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
pdwMIC_R = (PDWORD)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
if (pDevice->bTxMICFail == TRUE) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
pDevice->bTxMICFail = FALSE;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderLength, uPadding, cbIVlen);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
/*
for (ii = 0; ii < 8; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(((PBYTE)(pdwMIC_L) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
*/
}
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)){
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len),
(WORD)(cbFrameBodySize + cbMIClen));
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
//Set TSR1 & ReqCount in TxDescHead
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
pDevice->iTDUsed[uDMAIdx]++;
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" ptdCurr->m_dwReserved0[%d] ptdCurr->m_dwReserved1[%d].\n", ptdCurr->pTDInfo->dwReqCount, ptdCurr->pTDInfo->dwHeaderLength);
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cbHeaderLength[%d]\n", cbHeaderLength);
}
*puMACfragNum = uMACfragNum;
//DBG_PRTGRP03(("s_cbFillTxBufHead END\n"));
return cbHeaderLength;
}
void
vGenerateFIFOHeader (
PSDevice pDevice,
BYTE byPktType,
PBYTE pbyTxBufferAddr,
BOOL bNeedEncrypt,
UINT cbPayloadSize,
UINT uDMAIdx,
PSTxDesc pHeadTD,
PSEthernetHeader psEthHeader,
PBYTE pPacket,
PSKeyItem pTransmitKey,
UINT uNodeIndex,
PUINT puMACfragNum,
PUINT pcbHeaderSize
)
{
UINT wTxBufSize; // FFinfo size
BOOL bNeedACK;
BOOL bIsAdhoc;
WORD cbMacHdLen;
PSTxBufHead pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
memset(pTxBufHead, 0, wTxBufSize);
//Set FIFOCTL_NEEDACK
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
if (IS_MULTICAST_ADDRESS(&(psEthHeader->abyDstAddr[0])) ||
IS_BROADCAST_ADDRESS(&(psEthHeader->abyDstAddr[0]))) {
bNeedACK = FALSE;
pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
}
else {
bNeedACK = TRUE;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
}
bIsAdhoc = TRUE;
}
else {
// MSDUs in Infra mode always need ACK
bNeedACK = TRUE;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
bIsAdhoc = FALSE;
}
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
//Set FIFOCTL_LHEAD
if (pDevice->bLongHeader)
pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
//Set FIFOCTL_GENINT
pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT;
//Set FIFOCTL_ISDMA0
if (TYPE_TXDMA0 == uDMAIdx) {
pTxBufHead->wFIFOCtl |= FIFOCTL_ISDMA0;
}
//Set FRAGCTL_MACHDCNT
if (pDevice->bLongHeader) {
cbMacHdLen = WLAN_HDR_ADDR3_LEN + 6;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
pTxBufHead->wFragCtl |= cpu_to_le16((WORD)(cbMacHdLen << 10));
//Set packet type
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
;
}
else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
}
else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
}
else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
}
//Set FIFOCTL_GrpAckPolicy
if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//Set Auto Fallback Ctl
if (pDevice->wCurrentRate >= RATE_18M) {
if (pDevice->byAutoFBCtrl == AUTO_FB_0) {
pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_0;
} else if (pDevice->byAutoFBCtrl == AUTO_FB_1) {
pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_1;
}
}
//Set FRAGCTL_WEPTYP
pDevice->bAES = FALSE;
//Set FRAGCTL_WEPTYP
if (pDevice->byLocalID > REV_ID_VT3253_A1) {
if ((bNeedEncrypt) && (pTransmitKey != NULL)) { //WEP enabled
if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
}
else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { //WEP40 or WEP104
if (pTransmitKey->uKeyLength != WLAN_WEP232_KEYLEN)
pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
}
else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { //CCMP
pTxBufHead->wFragCtl |= FRAGCTL_AES;
}
}
}
#ifdef PLICE_DEBUG
//printk("Func:vGenerateFIFOHeader:TxDataRate is %d,TxPower is %d\n",pDevice->wCurrentRate,pDevice->byCurPwr);
//if (pDevice->wCurrentRate <= 3)
//{
// RFbRawSetPower(pDevice,36,pDevice->wCurrentRate);
//}
//else
RFbSetPower(pDevice, pDevice->wCurrentRate, pDevice->byCurrentCh);
#endif
//if (pDevice->wCurrentRate == 3)
//pDevice->byCurPwr = 46;
pTxBufHead->byTxPower = pDevice->byCurPwr;
/*
if(pDevice->bEnableHostWEP)
pTxBufHead->wFragCtl &= ~(FRAGCTL_TKIP | FRAGCTL_LEGACY |FRAGCTL_AES);
*/
*pcbHeaderSize = s_cbFillTxBufHead(pDevice, byPktType, pbyTxBufferAddr, cbPayloadSize,
uDMAIdx, pHeadTD, psEthHeader, pPacket, bNeedEncrypt,
pTransmitKey, uNodeIndex, puMACfragNum);
return;
}
/*+
*
* Description:
* Translate 802.3 to 802.11 header
*
* Parameters:
* In:
* pDevice - Pointer to adapter
* dwTxBufferAddr - Transmit Buffer
* pPacket - Packet from upper layer
* cbPacketSize - Transmit Data Length
* Out:
* pcbHeadSize - Header size of MAC&Baseband control and 802.11 Header
* pcbAppendPayload - size of append payload for 802.1H translation
*
* Return Value: none
*
-*/
void
vGenerateMACHeader (
PSDevice pDevice,
PBYTE pbyBufferAddr,
WORD wDuration,
PSEthernetHeader psEthHeader,
BOOL bNeedEncrypt,
WORD wFragType,
UINT uDMAIdx,
UINT uFragIdx
)
{
PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
memset(pMACHeader, 0, (sizeof(S802_11Header))); //- sizeof(pMACHeader->dwIV)));
if (uDMAIdx == TYPE_ATIMDMA) {
pMACHeader->wFrameCtl = TYPE_802_11_ATIM;
} else {
pMACHeader->wFrameCtl = TYPE_802_11_DATA;
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
pMACHeader->wFrameCtl |= FC_FROMDS;
}
else {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
pMACHeader->wFrameCtl |= FC_TODS;
}
}
if (bNeedEncrypt)
pMACHeader->wFrameCtl |= cpu_to_le16((WORD)WLAN_SET_FC_ISWEP(1));
pMACHeader->wDurationID = cpu_to_le16(wDuration);
if (pDevice->bLongHeader) {
PWLAN_80211HDR_A4 pMACA4Header = (PWLAN_80211HDR_A4) pbyBufferAddr;
pMACHeader->wFrameCtl |= (FC_TODS | FC_FROMDS);
memcpy(pMACA4Header->abyAddr4, pDevice->abyBSSID, WLAN_ADDR_LEN);
}
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
//Set FragNumber in Sequence Control
pMACHeader->wSeqCtl |= cpu_to_le16((WORD)uFragIdx);
if ((wFragType == FRAGCTL_ENDFRAG) || (wFragType == FRAGCTL_NONFRAG)) {
pDevice->wSeqCounter++;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
}
if ((wFragType == FRAGCTL_STAFRAG) || (wFragType == FRAGCTL_MIDFRAG)) { //StartFrag or MidFrag
pMACHeader->wFrameCtl |= FC_MOREFRAG;
}
}
CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
PSTxDesc pFrstTD;
BYTE byPktType;
PBYTE pbyTxBufferAddr;
void * pvRTS;
PSCTS pCTS;
void * pvTxDataHd;
UINT uDuration;
UINT cbReqCount;
PS802_11Header pMACHeader;
UINT cbHeaderSize;
UINT cbFrameBodySize;
BOOL bNeedACK;
BOOL bIsPSPOLL = FALSE;
PSTxBufHead pTxBufHead;
UINT cbFrameSize;
UINT cbIVlen = 0;
UINT cbICVlen = 0;
UINT cbMIClen = 0;
UINT cbFCSlen = 4;
UINT uPadding = 0;
WORD wTxBufSize;
UINT cbMacHdLen;
SEthernetHeader sEthHeader;
void * pvRrvTime;
void * pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
WORD wCurrentRate = RATE_1M;
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
return CMD_STATUS_RESOURCES;
}
pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
pbyTxBufferAddr = (PBYTE)pFrstTD->pTDInfo->buf;
cbFrameBodySize = pPacket->cbPayloadLen;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
wCurrentRate = RATE_6M;
byPktType = PK_TYPE_11A;
} else {
wCurrentRate = RATE_1M;
byPktType = PK_TYPE_11B;
}
// SetPower will cause error power TX state for OFDM Date packet in TX buffer.
// 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
// And cmd timer will wait data pkt TX finish before scanning so it's OK
// to set power here.
if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) {
RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
} else {
RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
}
pTxBufHead->byTxPower = pDevice->byCurPwr;
//+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
if (pDevice->byFOETuning) {
if ((pPacket->p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
wCurrentRate = RATE_24M;
byPktType = PK_TYPE_11GA;
}
}
//Set packet type
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
pTxBufHead->wFIFOCtl = 0;
}
else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
}
else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
}
else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
}
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
if (IS_MULTICAST_ADDRESS(&(pPacket->p80211Header->sA3.abyAddr1[0])) ||
IS_BROADCAST_ADDRESS(&(pPacket->p80211Header->sA3.abyAddr1[0]))) {
bNeedACK = FALSE;
}
else {
bNeedACK = TRUE;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ) {
pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
//Set Preamble type always long
//pDevice->byPreambleType = PREAMBLE_LONG;
// probe-response don't retry
//if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) {
// bNeedACK = FALSE;
// pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK);
//}
}
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
bIsPSPOLL = TRUE;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
//Set FRAGCTL_MACHDCNT
pTxBufHead->wFragCtl |= cpu_to_le16((WORD)(cbMacHdLen << 10));
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
pDevice->bAES = FALSE; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
cbIVlen = 4;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
}
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
//We need to get seed here for filling TxKey entry.
//TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
// pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
}
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
pTxBufHead->wFragCtl |= FRAGCTL_AES;
pDevice->bAES = TRUE;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
uPadding %= 4;
}
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen;
//Set FIFOCTL_GrpAckPolicy
if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
//Set RrvTime/RTS/CTS Buffer
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
pvRTS = NULL;
pCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS));
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS) + sizeof(STxDataHead_g);
}
else { // 802.11a/b packet
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
pvRTS = NULL;
pCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + sizeof(STxDataHead_ab);
}
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
//=========================
// No Fragmentation
//=========================
pTxBufHead->wFragCtl |= (WORD)FRAGCTL_NONFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pCTS,
cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
0, 0, 1, AUTO_FB_NONE, wCurrentRate);
pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + cbFrameBodySize;
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
PBYTE pbyIVHead;
PBYTE pbyPayloadHead;
PBYTE pbyBSSID;
PSKeyItem pTransmitKey = NULL;
pbyIVHead = (PBYTE)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
pbyPayloadHead = (PBYTE)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
//Fill TXKEY
//Kyle: Need fix: TKIP and AES did't encryt Mnt Packet.
//s_vFillTxKey(pDevice, (PBYTE)pTxBufHead->adwTxKey, NULL);
//Fill IV(ExtIV,RSNHDR)
//s_vFillPrePayload(pDevice, pbyIVHead, NULL);
//---------------------------
// S/W or H/W Encryption
//---------------------------
//Fill MICHDR
//if (pDevice->bAES) {
// s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, (PBYTE)pMACHeader, (WORD)cbFrameBodySize);
//}
do {
if ((pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
(pDevice->bLinkPass == TRUE)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == FALSE) {
// get group key
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == TRUE) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
break;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get PTK.\n");
break;
}
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == FALSE) {
pTransmitKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KEY is NULL. OP Mode[%d]\n", pDevice->eOPMode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Get GTK.\n");
}
} while(FALSE);
//Fill TXKEY
s_vFillTxKey(pDevice, (PBYTE)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
(PBYTE)pMACHeader, (WORD)cbFrameBodySize, NULL);
memcpy(pMACHeader, pPacket->p80211Header, cbMacHdLen);
memcpy(pbyPayloadHead, ((PBYTE)(pPacket->p80211Header) + cbMacHdLen),
cbFrameBodySize);
}
else {
// Copy the Packet into a tx Buffer
memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
}
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
pDevice->wSeqCounter++ ;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
if (bIsPSPOLL) {
// The MAC will automatically replace the Duration-field of MAC header by Duration-field
// of FIFO control header.
// This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
} else {
((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
}
}
// first TD is the only TD
//Set TSR1 & ReqCount in TxDescHead
pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
pFrstTD->m_td1TD1.wReqCount = cpu_to_le16((WORD)(cbReqCount));
pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
pFrstTD->pTDInfo->byFlags = 0;
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = FALSE;
wmb();
pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
wmb();
pDevice->iTDUsed[TYPE_TXDMA0]++;
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " available td0 <= 1\n");
}
pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
#ifdef PLICE_DEBUG
//printk("SCAN:CurrentRate is %d,TxPower is %d\n",wCurrentRate,pTxBufHead->byTxPower);
#endif
#ifdef TxInSleep
pDevice->nTxDataTimeCout=0; //2008-8-21 chester <add> for send null packet
#endif
// Poll Transmit the adapter
MACvTransmit0(pDevice->PortOffset);
return CMD_STATUS_PENDING;
}
CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
BYTE byPktType;
PBYTE pbyBuffer = (PBYTE)pDevice->tx_beacon_bufs;
UINT cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
UINT cbHeaderSize = 0;
WORD wTxBufSize = sizeof(STxShortBufHead);
PSTxShortBufHead pTxBufHead = (PSTxShortBufHead) pbyBuffer;
PSTxDataHead_ab pTxDataHead = (PSTxDataHead_ab) (pbyBuffer + wTxBufSize);
PS802_11Header pMACHeader;
WORD wCurrentRate;
WORD wLen = 0x0000;
memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
wCurrentRate = RATE_6M;
byPktType = PK_TYPE_11A;
} else {
wCurrentRate = RATE_2M;
byPktType = PK_TYPE_11B;
}
//Set Preamble type always long
pDevice->byPreambleType = PREAMBLE_LONG;
//Set FIFOCTL_GENINT
pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT;
//Set packet type & Get Duration
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
pTxDataHead->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, byPktType,
wCurrentRate, FALSE, 0, 0, 1, AUTO_FB_NONE));
}
else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
pTxDataHead->wDuration = cpu_to_le16((WORD)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, byPktType,
wCurrentRate, FALSE, 0, 0, 1, AUTO_FB_NONE));
}
BBvCaculateParameter(pDevice, cbFrameSize, wCurrentRate, byPktType,
(PWORD)&(wLen), (PBYTE)&(pTxDataHead->byServiceField), (PBYTE)&(pTxDataHead->bySignalField)
);
pTxDataHead->wTransmitLength = cpu_to_le16(wLen);
//Get TimeStampOff
pTxDataHead->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
//Generate Beacon Header
pMACHeader = (PS802_11Header)(pbyBuffer + cbHeaderSize);
memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
pMACHeader->wDurationID = 0;
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
pDevice->wSeqCounter++ ;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
// Set Beacon buffer length
pDevice->wBCNBufLen = pPacket->cbMPDULen + cbHeaderSize;
MACvSetCurrBCNTxDescAddr(pDevice->PortOffset, (pDevice->tx_beacon_dma));
MACvSetCurrBCNLength(pDevice->PortOffset, pDevice->wBCNBufLen);
// Set auto Transmit on
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
// Poll Transmit the adapter
MACvTransmitBCN(pDevice->PortOffset);
return CMD_STATUS_PENDING;
}
UINT
cbGetFragCount (
PSDevice pDevice,
PSKeyItem pTransmitKey,
UINT cbFrameBodySize,
PSEthernetHeader psEthHeader
)
{
UINT cbMACHdLen;
UINT cbFrameSize;
UINT cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
UINT cbFragPayloadSize;
UINT cbLastFragPayloadSize;
UINT cbIVlen = 0;
UINT cbICVlen = 0;
UINT cbMIClen = 0;
UINT cbFCSlen = 4;
UINT uMACfragNum = 1;
BOOL bNeedACK;
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
if (IS_MULTICAST_ADDRESS(&(psEthHeader->abyDstAddr[0])) ||
IS_BROADCAST_ADDRESS(&(psEthHeader->abyDstAddr[0]))) {
bNeedACK = FALSE;
}
else {
bNeedACK = TRUE;
}
}
else {
// MSDUs in Infra mode always need ACK
bNeedACK = TRUE;
}
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
else
cbMACHdLen = WLAN_HDR_ADDR3_LEN;
if (pDevice->bEncryptionEnable == TRUE) {
if (pTransmitKey == NULL) {
if ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) ||
(pDevice->pMgmt->eAuthenMode < WMAC_AUTH_WPA)) {
cbIVlen = 4;
cbICVlen = 4;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
}
} else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
cbIVlen = 4;
cbICVlen = 4;
} else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
} else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
}
}
cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == TRUE)) {
// Fragmentation
cbFragmentSize = pDevice->wFragmentationThreshold;
cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
uMACfragNum = (WORD) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
if (cbLastFragPayloadSize == 0) {
cbLastFragPayloadSize = cbFragPayloadSize;
} else {
uMACfragNum++;
}
}
return uMACfragNum;
}
void
vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDULen) {
PSTxDesc pFrstTD;
BYTE byPktType;
PBYTE pbyTxBufferAddr;
void * pvRTS;
void * pvCTS;
void * pvTxDataHd;
UINT uDuration;
UINT cbReqCount;
PS802_11Header pMACHeader;
UINT cbHeaderSize;
UINT cbFrameBodySize;
BOOL bNeedACK;
BOOL bIsPSPOLL = FALSE;
PSTxBufHead pTxBufHead;
UINT cbFrameSize;
UINT cbIVlen = 0;
UINT cbICVlen = 0;
UINT cbMIClen = 0;
UINT cbFCSlen = 4;
UINT uPadding = 0;
UINT cbMICHDR = 0;
UINT uLength = 0;
DWORD dwMICKey0, dwMICKey1;
DWORD dwMIC_Priority;
PDWORD pdwMIC_L;
PDWORD pdwMIC_R;
WORD wTxBufSize;
UINT cbMacHdLen;
SEthernetHeader sEthHeader;
void * pvRrvTime;
void * pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
WORD wCurrentRate = RATE_1M;
PUWLAN_80211HDR p80211Header;
UINT uNodeIndex = 0;
BOOL bNodeExist = FALSE;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
PBYTE pbyIVHead;
PBYTE pbyPayloadHead;
PBYTE pbyMacHdr;
UINT cbExtSuppRate = 0;
// PWLAN_IE pItem;
pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
if(cbMPDULen <= WLAN_HDR_ADDR3_LEN) {
cbFrameBodySize = 0;
}
else {
cbFrameBodySize = cbMPDULen - WLAN_HDR_ADDR3_LEN;
}
p80211Header = (PUWLAN_80211HDR)pbMPDU;
pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
pbyTxBufferAddr = (PBYTE)pFrstTD->pTDInfo->buf;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
wCurrentRate = RATE_6M;
byPktType = PK_TYPE_11A;
} else {
wCurrentRate = RATE_1M;
byPktType = PK_TYPE_11B;
}
// SetPower will cause error power TX state for OFDM Date packet in TX buffer.
// 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
// And cmd timer will wait data pkt TX finish before scanning so it's OK
// to set power here.
if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) {
RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
} else {
RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
}
pTxBufHead->byTxPower = pDevice->byCurPwr;
//+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
if (pDevice->byFOETuning) {
if ((p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
wCurrentRate = RATE_24M;
byPktType = PK_TYPE_11GA;
}
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vDMA0_tx_80211: p80211Header->sA3.wFrameCtl = %x \n", p80211Header->sA3.wFrameCtl);
//Set packet type
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
pTxBufHead->wFIFOCtl = 0;
}
else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
}
else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
}
else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
}
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
if (IS_MULTICAST_ADDRESS(&(p80211Header->sA3.abyAddr1[0])) ||
IS_BROADCAST_ADDRESS(&(p80211Header->sA3.abyAddr1[0]))) {
bNeedACK = FALSE;
if (pDevice->bEnableHostWEP) {
uNodeIndex = 0;
bNodeExist = TRUE;
};
}
else {
if (pDevice->bEnableHostWEP) {
if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, (PBYTE)(p80211Header->sA3.abyAddr1), &uNodeIndex))
bNodeExist = TRUE;
};
bNeedACK = TRUE;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ) {
pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
//Set Preamble type always long
//pDevice->byPreambleType = PREAMBLE_LONG;
// probe-response don't retry
//if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_MGMT_PROBE_RSP) {
// bNeedACK = FALSE;
// pTxBufHead->wFIFOCtl &= (~FIFOCTL_NEEDACK);
//}
}
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
bIsPSPOLL = TRUE;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
// hostapd deamon ext support rate patch
if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0) {
cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN;
}
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0) {
cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
}
if (cbExtSuppRate >0) {
cbFrameBodySize = WLAN_ASSOCRESP_OFF_SUPP_RATES;
}
}
//Set FRAGCTL_MACHDCNT
pTxBufHead->wFragCtl |= cpu_to_le16((WORD)cbMacHdLen << 10);
// Notes:
// Although spec says MMPDU can be fragmented; In most case,
// no one will send a MMPDU under fragmentation. With RTS may occur.
pDevice->bAES = FALSE; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
cbIVlen = 4;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
}
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
//We need to get seed here for filling TxKey entry.
//TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
// pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
}
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
cbMICHDR = sizeof(SMICHDRHead);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
pDevice->bAES = TRUE;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
uPadding %= 4;
}
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen + cbExtSuppRate;
//Set FIFOCTL_GrpAckPolicy
if (pDevice->bGrpAckPolicy == TRUE) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvRTS = NULL;
pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
}
else {//802.11a/b packet
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
}
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
//=========================
// No Fragmentation
//=========================
pTxBufHead->wFragCtl |= (WORD)FRAGCTL_NONFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
0, 0, 1, AUTO_FB_NONE, wCurrentRate);
pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen) + cbExtSuppRate;
pbyMacHdr = (PBYTE)(pbyTxBufferAddr + cbHeaderSize);
pbyPayloadHead = (PBYTE)(pbyMacHdr + cbMacHdLen + uPadding + cbIVlen);
pbyIVHead = (PBYTE)(pbyMacHdr + cbMacHdLen + uPadding);
// Copy the Packet into a tx Buffer
memcpy(pbyMacHdr, pbMPDU, cbMacHdLen);
// version set to 0, patch for hostapd deamon
pMACHeader->wFrameCtl &= cpu_to_le16(0xfffc);
memcpy(pbyPayloadHead, (pbMPDU + cbMacHdLen), cbFrameBodySize);
// replace support rate, patch for hostapd deamon( only support 11M)
if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
if (cbExtSuppRate != 0) {
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0)
memcpy((pbyPayloadHead + cbFrameBodySize),
pMgmt->abyCurrSuppRates,
((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN
);
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0)
memcpy((pbyPayloadHead + cbFrameBodySize) + ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN,
pMgmt->abyCurrExtSuppRates,
((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN
);
}
}
// Set wep
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->bEnableHostWEP) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
pTransmitKey->uKeyLength
);
}
if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
dwMICKey0 = *(PDWORD)(&pTransmitKey->abyKey[16]);
dwMICKey1 = *(PDWORD)(&pTransmitKey->abyKey[20]);
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize);
pdwMIC_L = (PDWORD)(pbyTxBufferAddr + uLength + cbFrameBodySize);
pdwMIC_R = (PDWORD)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
if (pDevice->bTxMICFail == TRUE) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
pDevice->bTxMICFail = FALSE;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
}
s_vFillTxKey(pDevice, (PBYTE)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (WORD)cbFrameBodySize, (PBYTE)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
s_vSWencryption(pDevice, pTransmitKey, pbyPayloadHead, (WORD)(cbFrameBodySize + cbMIClen));
}
}
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
pDevice->wSeqCounter++ ;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
if (bIsPSPOLL) {
// The MAC will automatically replace the Duration-field of MAC header by Duration-field
// of FIFO control header.
// This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(p80211Header->sA2.wDurationID);
((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(p80211Header->sA2.wDurationID);
} else {
((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(p80211Header->sA2.wDurationID);
}
}
// first TD is the only TD
//Set TSR1 & ReqCount in TxDescHead
pFrstTD->pTDInfo->skb = skb;
pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
pFrstTD->m_td1TD1.wReqCount = cpu_to_le16(cbReqCount);
pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
pFrstTD->pTDInfo->byFlags = 0;
pFrstTD->pTDInfo->byFlags |= TD_FLAGS_PRIV_SKB;
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = FALSE;
wmb();
pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
wmb();
pDevice->iTDUsed[TYPE_TXDMA0]++;
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " available td0 <= 1\n");
}
pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
// Poll Transmit the adapter
MACvTransmit0(pDevice->PortOffset);
return;
}
| gpl-2.0 |
mdeejay/msm7x30-ics | drivers/staging/comedi/drivers/addi-data/addi_common.c | 761 | 57074 | /**
@verbatim
Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
ADDI-DATA GmbH
Dieselstrasse 3
D-77833 Ottersweier
Tel: +19(0)7223/9493-0
Fax: +49(0)7223/9493-92
http://www.addi-data-com
info@addi-data.com
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should also find the complete GPL in the COPYING file accompanying this source code.
@endverbatim
*/
/*
+-----------------------------------------------------------------------+
| (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier |
+-----------------------------------------------------------------------+
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-----------------------------------------------------------------------+
| Project : ADDI DATA | Compiler : GCC |
| Modulname : addi_common.c | Version : 2.96 |
+-------------------------------+---------------------------------------+
| Author : | Date : |
+-----------------------------------------------------------------------+
| Description : ADDI COMMON Main Module |
+-----------------------------------------------------------------------+
| CONFIG OPTIONS |
| option[0] - PCI bus number - if bus number and slot number are 0, |
| then driver search for first unused card |
| option[1] - PCI slot number |
| |
| option[2] = 0 - DMA ENABLE |
| = 1 - DMA DISABLE |
+----------+-----------+------------------------------------------------+
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include "../../comedidev.h"
#include <asm/io.h>
#if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300)
#include <asm/i387.h>
#endif
#include "../comedi_fc.h"
#include "addi_common.h"
#include "addi_amcc_s5933.h"
#ifndef ADDIDATA_DRIVER_NAME
#define ADDIDATA_DRIVER_NAME "addi_common"
#endif
/* Update-0.7.57->0.7.68MODULE_AUTHOR("ADDI-DATA GmbH <info@addi-data.com>"); */
/* Update-0.7.57->0.7.68MODULE_DESCRIPTION("Comedi ADDI-DATA module"); */
/* Update-0.7.57->0.7.68MODULE_LICENSE("GPL"); */
#define devpriv ((struct addi_private *)dev->private)
#define this_board ((struct addi_board *)dev->board_ptr)
#if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300)
/* BYTE b_SaveFPUReg [94]; */
void fpu_begin(void)
{
/* asm ("fstenv b_SaveFPUReg"); */
kernel_fpu_begin();
}
void fpu_end(void)
{
/* asm ("frstor b_SaveFPUReg"); */
kernel_fpu_end();
}
#endif
#include "addi_eeprom.c"
#if (defined (CONFIG_APCI_3120) || defined (CONFIG_APCI_3001))
#include "hwdrv_apci3120.c"
#endif
#ifdef CONFIG_APCI_1032
#include "hwdrv_apci1032.c"
#endif
#ifdef CONFIG_APCI_1516
#include "hwdrv_apci1516.c"
#endif
#ifdef CONFIG_APCI_2016
#include "hwdrv_apci2016.c"
#endif
#ifdef CONFIG_APCI_2032
#include "hwdrv_apci2032.c"
#endif
#ifdef CONFIG_APCI_2200
#include "hwdrv_apci2200.c"
#endif
#ifdef CONFIG_APCI_1564
#include "hwdrv_apci1564.c"
#endif
#ifdef CONFIG_APCI_1500
#include "hwdrv_apci1500.c"
#endif
#ifdef CONFIG_APCI_3501
#include "hwdrv_apci3501.c"
#endif
#ifdef CONFIG_APCI_035
#include "hwdrv_apci035.c"
#endif
#if (defined (CONFIG_APCI_3200) || defined (CONFIG_APCI_3300))
#include "hwdrv_apci3200.c"
#endif
#ifdef CONFIG_APCI_1710
#include "hwdrv_APCI1710.c"
#endif
#ifdef CONFIG_APCI_16XX
#include "hwdrv_apci16xx.c"
#endif
#ifdef CONFIG_APCI_3XXX
#include "hwdrv_apci3xxx.c"
#endif
#ifndef COMEDI_SUBD_TTLIO
#define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */
#endif
static DEFINE_PCI_DEVICE_TABLE(addi_apci_tbl) = {
#ifdef CONFIG_APCI_3120
{APCI3120_BOARD_VENDOR_ID, 0x818D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_1032
{APCI1032_BOARD_VENDOR_ID, 0x1003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_1516
{APCI1516_BOARD_VENDOR_ID, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_2016
{APCI2016_BOARD_VENDOR_ID, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_2032
{APCI2032_BOARD_VENDOR_ID, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_2200
{APCI2200_BOARD_VENDOR_ID, 0x1005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_1564
{APCI1564_BOARD_VENDOR_ID, 0x1006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_1500
{APCI1500_BOARD_VENDOR_ID, 0x80fc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_3001
{APCI3120_BOARD_VENDOR_ID, 0x828D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_3501
{APCI3501_BOARD_VENDOR_ID, 0x3001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_035
{APCI035_BOARD_VENDOR_ID, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_3200
{APCI3200_BOARD_VENDOR_ID, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_3300
{APCI3200_BOARD_VENDOR_ID, 0x3007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_1710
{APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_16XX
{0x15B8, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x100A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
#ifdef CONFIG_APCI_3XXX
{0x15B8, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x300F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x300E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x301A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x301B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x301C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x301D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x301E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x301F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3023, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x300B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x15B8, 0x3024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#endif
{0}
};
MODULE_DEVICE_TABLE(pci, addi_apci_tbl);
static const struct addi_board boardtypes[] = {
#ifdef CONFIG_APCI_3120
{"apci3120",
APCI3120_BOARD_VENDOR_ID,
0x818D,
AMCC_OP_REG_SIZE,
APCI3120_ADDRESS_RANGE,
8,
0,
ADDIDATA_NO_EEPROM,
NULL,
16,
8,
16,
8,
0xffff,
0x3fff,
&range_apci3120_ai,
&range_apci3120_ao,
4,
4,
0x0f,
0,
NULL,
1,
1,
1,
10000,
100000,
v_APCI3120_Interrupt,
i_APCI3120_Reset,
i_APCI3120_InsnConfigAnalogInput,
i_APCI3120_InsnReadAnalogInput,
NULL,
NULL,
i_APCI3120_CommandTestAnalogInput,
i_APCI3120_CommandAnalogInput,
i_APCI3120_StopCyclicAcquisition,
NULL,
i_APCI3120_InsnWriteAnalogOutput,
NULL,
NULL,
i_APCI3120_InsnReadDigitalInput,
NULL,
i_APCI3120_InsnBitsDigitalInput,
i_APCI3120_InsnConfigDigitalOutput,
i_APCI3120_InsnWriteDigitalOutput,
i_APCI3120_InsnBitsDigitalOutput,
NULL,
i_APCI3120_InsnConfigTimer,
i_APCI3120_InsnWriteTimer,
i_APCI3120_InsnReadTimer,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_1032
{"apci1032",
APCI1032_BOARD_VENDOR_ID,
0x1003,
4,
APCI1032_ADDRESS_RANGE,
0,
0,
ADDIDATA_EEPROM,
ADDIDATA_93C76,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
32,
0,
0,
0,
NULL,
0,
0,
0,
0,
0,
v_APCI1032_Interrupt,
i_APCI1032_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI1032_ConfigDigitalInput,
i_APCI1032_Read1DigitalInput,
NULL,
i_APCI1032_ReadMoreDigitalInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_1516
{"apci1516",
APCI1516_BOARD_VENDOR_ID,
0x1001,
128,
APCI1516_ADDRESS_RANGE,
32,
0,
ADDIDATA_EEPROM,
ADDIDATA_S5920,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
8,
8,
0,
0,
NULL,
0,
1,
0,
0,
0,
NULL,
i_APCI1516_Reset,
NULL, NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI1516_Read1DigitalInput,
NULL,
i_APCI1516_ReadMoreDigitalInput,
i_APCI1516_ConfigDigitalOutput,
i_APCI1516_WriteDigitalOutput,
i_APCI1516_ReadDigitalOutput,
NULL,
i_APCI1516_ConfigWatchdog,
i_APCI1516_StartStopWriteWatchdog,
i_APCI1516_ReadWatchdog,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_2016
{"apci2016",
APCI2016_BOARD_VENDOR_ID,
0x1002,
128,
APCI2016_ADDRESS_RANGE,
32,
0,
ADDIDATA_EEPROM,
ADDIDATA_S5920,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
0,
16,
0,
0,
NULL,
0,
1,
0,
0,
0,
NULL,
i_APCI2016_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI2016_ConfigDigitalOutput,
i_APCI2016_WriteDigitalOutput,
i_APCI2016_BitsDigitalOutput,
NULL,
i_APCI2016_ConfigWatchdog,
i_APCI2016_StartStopWriteWatchdog,
i_APCI2016_ReadWatchdog,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_2032
{"apci2032",
APCI2032_BOARD_VENDOR_ID,
0x1004,
4,
APCI2032_ADDRESS_RANGE,
0,
0,
ADDIDATA_EEPROM,
ADDIDATA_93C76,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
0,
32,
0xffffffff,
0,
NULL,
0,
1,
0,
0,
0,
v_APCI2032_Interrupt,
i_APCI2032_Reset,
NULL, NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI2032_ConfigDigitalOutput,
i_APCI2032_WriteDigitalOutput,
i_APCI2032_ReadDigitalOutput,
i_APCI2032_ReadInterruptStatus,
i_APCI2032_ConfigWatchdog,
i_APCI2032_StartStopWriteWatchdog,
i_APCI2032_ReadWatchdog,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_2200
{"apci2200",
APCI2200_BOARD_VENDOR_ID,
0x1005,
4,
APCI2200_ADDRESS_RANGE,
0,
0,
ADDIDATA_EEPROM,
ADDIDATA_93C76,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
8,
16,
0,
0,
NULL,
0,
1,
0,
0,
0,
NULL,
i_APCI2200_Reset,
NULL, NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI2200_Read1DigitalInput,
NULL,
i_APCI2200_ReadMoreDigitalInput,
i_APCI2200_ConfigDigitalOutput,
i_APCI2200_WriteDigitalOutput,
i_APCI2200_ReadDigitalOutput,
NULL,
i_APCI2200_ConfigWatchdog,
i_APCI2200_StartStopWriteWatchdog,
i_APCI2200_ReadWatchdog,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_1564
{"apci1564",
APCI1564_BOARD_VENDOR_ID,
0x1006,
128,
APCI1564_ADDRESS_RANGE,
0,
0,
ADDIDATA_EEPROM,
ADDIDATA_93C76,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
32,
32,
0xffffffff,
0,
NULL,
0,
1,
0,
0,
0,
v_APCI1564_Interrupt,
i_APCI1564_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI1564_ConfigDigitalInput,
i_APCI1564_Read1DigitalInput,
NULL,
i_APCI1564_ReadMoreDigitalInput,
i_APCI1564_ConfigDigitalOutput,
i_APCI1564_WriteDigitalOutput,
i_APCI1564_ReadDigitalOutput,
i_APCI1564_ReadInterruptStatus,
i_APCI1564_ConfigTimerCounterWatchdog,
i_APCI1564_StartStopWriteTimerCounterWatchdog,
i_APCI1564_ReadTimerCounterWatchdog,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_1500
{"apci1500",
APCI1500_BOARD_VENDOR_ID,
0x80fc,
128,
APCI1500_ADDRESS_RANGE,
4,
0,
ADDIDATA_NO_EEPROM,
NULL,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
16,
16,
0xffff,
0,
NULL,
0,
1,
0,
0,
0,
v_APCI1500_Interrupt,
i_APCI1500_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI1500_ConfigDigitalInputEvent,
i_APCI1500_Initialisation,
i_APCI1500_StartStopInputEvent,
i_APCI1500_ReadMoreDigitalInput,
i_APCI1500_ConfigDigitalOutputErrorInterrupt,
i_APCI1500_WriteDigitalOutput,
i_APCI1500_ConfigureInterrupt,
NULL,
i_APCI1500_ConfigCounterTimerWatchdog,
i_APCI1500_StartStopTriggerTimerCounterWatchdog,
i_APCI1500_ReadInterruptMask,
i_APCI1500_ReadCounterTimerWatchdog,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_3001
{"apci3001",
APCI3120_BOARD_VENDOR_ID,
0x828D,
AMCC_OP_REG_SIZE,
APCI3120_ADDRESS_RANGE,
8,
0,
ADDIDATA_NO_EEPROM,
NULL,
16,
8,
16,
0,
0xfff,
0,
&range_apci3120_ai,
NULL,
4,
4,
0x0f,
0,
NULL,
1,
1,
1,
10000,
100000,
v_APCI3120_Interrupt,
i_APCI3120_Reset,
i_APCI3120_InsnConfigAnalogInput,
i_APCI3120_InsnReadAnalogInput,
NULL,
NULL,
i_APCI3120_CommandTestAnalogInput,
i_APCI3120_CommandAnalogInput,
i_APCI3120_StopCyclicAcquisition,
NULL,
NULL,
NULL,
NULL,
i_APCI3120_InsnReadDigitalInput,
NULL,
i_APCI3120_InsnBitsDigitalInput,
i_APCI3120_InsnConfigDigitalOutput,
i_APCI3120_InsnWriteDigitalOutput,
i_APCI3120_InsnBitsDigitalOutput,
NULL,
i_APCI3120_InsnConfigTimer,
i_APCI3120_InsnWriteTimer,
i_APCI3120_InsnReadTimer,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_3501
{"apci3501",
APCI3501_BOARD_VENDOR_ID,
0x3001,
64,
APCI3501_ADDRESS_RANGE,
0,
0,
ADDIDATA_EEPROM,
ADDIDATA_S5933,
0,
0,
0,
8,
0,
16383,
NULL,
&range_apci3501_ao,
2,
2,
0x3,
0,
NULL,
0,
1,
0,
0,
0,
v_APCI3501_Interrupt,
i_APCI3501_Reset,
NULL, NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3501_ConfigAnalogOutput,
i_APCI3501_WriteAnalogOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3501_ReadDigitalInput,
i_APCI3501_ConfigDigitalOutput,
i_APCI3501_WriteDigitalOutput,
i_APCI3501_ReadDigitalOutput,
NULL,
i_APCI3501_ConfigTimerCounterWatchdog,
i_APCI3501_StartStopWriteTimerCounterWatchdog,
i_APCI3501_ReadTimerCounterWatchdog,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_035
{"apci035",
APCI035_BOARD_VENDOR_ID,
0x0300,
127,
APCI035_ADDRESS_RANGE,
0,
0,
1,
ADDIDATA_S5920,
16,
8,
16,
0,
0xff,
0,
&range_apci035_ai,
NULL,
0,
0,
0,
0,
NULL,
0,
1,
0,
10000,
100000,
v_APCI035_Interrupt,
i_APCI035_Reset,
i_APCI035_ConfigAnalogInput,
i_APCI035_ReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI035_ConfigTimerWatchdog,
i_APCI035_StartStopWriteTimerWatchdog,
i_APCI035_ReadTimerWatchdog,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_3200
{"apci3200",
APCI3200_BOARD_VENDOR_ID,
0x3000,
128,
256,
4,
4,
ADDIDATA_EEPROM,
ADDIDATA_S5920,
16,
8,
16,
0,
0x3ffff,
0,
&range_apci3200_ai,
NULL,
4,
4,
0,
0,
NULL,
0,
0,
0,
10000,
100000,
v_APCI3200_Interrupt,
i_APCI3200_Reset,
i_APCI3200_ConfigAnalogInput,
i_APCI3200_ReadAnalogInput,
i_APCI3200_InsnWriteReleaseAnalogInput,
i_APCI3200_InsnBits_AnalogInput_Test,
i_APCI3200_CommandTestAnalogInput,
i_APCI3200_CommandAnalogInput,
i_APCI3200_StopCyclicAcquisition,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3200_ReadDigitalInput,
i_APCI3200_ConfigDigitalOutput,
i_APCI3200_WriteDigitalOutput,
i_APCI3200_ReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_3300
/* Begin JK .20.10.2004 = APCI-3300 integration */
{"apci3300",
APCI3200_BOARD_VENDOR_ID,
0x3007,
128,
256,
4,
4,
ADDIDATA_EEPROM,
ADDIDATA_S5920,
0,
8,
8,
0,
0x3ffff,
0,
&range_apci3300_ai,
NULL,
4,
4,
0,
0,
NULL,
0,
0,
0,
10000,
100000,
v_APCI3200_Interrupt,
i_APCI3200_Reset,
i_APCI3200_ConfigAnalogInput,
i_APCI3200_ReadAnalogInput,
i_APCI3200_InsnWriteReleaseAnalogInput,
i_APCI3200_InsnBits_AnalogInput_Test,
i_APCI3200_CommandTestAnalogInput,
i_APCI3200_CommandAnalogInput,
i_APCI3200_StopCyclicAcquisition,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3200_ReadDigitalInput,
i_APCI3200_ConfigDigitalOutput,
i_APCI3200_WriteDigitalOutput,
i_APCI3200_ReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_1710
{"apci1710", APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID,
128,
8,
256,
0,
ADDIDATA_NO_EEPROM,
NULL,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
0,
0,
0,
0,
NULL,
0,
0,
0,
0,
0,
v_APCI1710_Interrupt,
i_APCI1710_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
#endif
#ifdef CONFIG_APCI_16XX
{"apci1648",
0x15B8,
0x1009,
128,
0,
0,
0,
ADDIDATA_NO_EEPROM,
NULL,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
0,
0,
0,
48,
&range_apci16xx_ttl,
0,
0,
0,
0,
0,
NULL,
i_APCI16XX_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI16XX_InsnConfigInitTTLIO,
i_APCI16XX_InsnBitsReadTTLIO,
i_APCI16XX_InsnReadTTLIOAllPortValue,
i_APCI16XX_InsnBitsWriteTTLIO},
{"apci1696",
0x15B8,
0x100A,
128,
0,
0,
0,
ADDIDATA_NO_EEPROM,
NULL,
0,
0,
0,
0,
0,
0,
NULL,
NULL,
0,
0,
0,
96,
&range_apci16xx_ttl,
0,
0,
0,
0,
0,
NULL,
i_APCI16XX_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI16XX_InsnConfigInitTTLIO,
i_APCI16XX_InsnBitsReadTTLIO,
i_APCI16XX_InsnReadTTLIOAllPortValue,
i_APCI16XX_InsnBitsWriteTTLIO},
#endif
#ifdef CONFIG_APCI_3XXX
{"apci3000-16",
0x15B8,
0x3010,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
0,
4095,
0,
&range_apci3XXX_ai,
NULL,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-8",
0x15B8,
0x300F,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
0,
4095,
0,
&range_apci3XXX_ai,
NULL,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-4",
0x15B8,
0x300E,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
4,
2,
4,
0,
4095,
0,
&range_apci3XXX_ai,
NULL,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-16",
0x15B8,
0x3013,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-8",
0x15B8,
0x3014,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-4",
0x15B8,
0x3015,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
4,
2,
4,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-16",
0x15B8,
0x3016,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
0,
4095,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-8",
0x15B8,
0x3017,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
0,
4095,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-4",
0x15B8,
0x3018,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
4,
2,
4,
0,
4095,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-16",
0x15B8,
0x3019,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-8",
0x15B8,
0x301A,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-4",
0x15B8,
0x301B,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
4,
2,
4,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-16-4",
0x15B8,
0x301C,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
4,
4095,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-8-4",
0x15B8,
0x301D,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
4,
4095,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-16-4",
0x15B8,
0x301E,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
4,
65535,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-8-4",
0x15B8,
0x301F,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
4,
65535,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
6,
10000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-16-4",
0x15B8,
0x3020,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
4,
4095,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-8-4",
0x15B8,
0x3021,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
4,
4095,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-16-4",
0x15B8,
0x3022,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
16,
8,
16,
4,
65535,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-8-4",
0x15B8,
0x3023,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
8,
4,
8,
4,
65535,
4095,
&range_apci3XXX_ai,
&range_apci3XXX_ao,
4,
4,
1,
24,
&range_apci3XXX_ttl,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
{"apci3003",
0x15B8,
0x300B,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
0,
4,
4,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
0,
NULL,
0,
0,
7,
2500,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
{"apci3002-16",
0x15B8,
0x3002,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
0,
16,
16,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
0,
NULL,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
{"apci3002-8",
0x15B8,
0x3003,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
0,
8,
8,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
0,
NULL,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
{"apci3002-4",
0x15B8,
0x3004,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
0,
4,
4,
0,
65535,
0,
&range_apci3XXX_ai,
NULL,
4,
4,
1,
0,
NULL,
0,
0,
6,
5000,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
i_APCI3XXX_InsnConfigAnalogInput,
i_APCI3XXX_InsnReadAnalogInput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnReadDigitalInput,
NULL,
i_APCI3XXX_InsnBitsDigitalInput,
NULL,
i_APCI3XXX_InsnWriteDigitalOutput,
i_APCI3XXX_InsnBitsDigitalOutput,
i_APCI3XXX_InsnReadDigitalOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL},
{"apci3500",
0x15B8,
0x3024,
256,
256,
256,
256,
ADDIDATA_NO_EEPROM,
ADDIDATA_9054,
0,
0,
0,
4,
0,
4095,
NULL,
&range_apci3XXX_ao,
0,
0,
0,
24,
&range_apci3XXX_ttl,
0,
0,
0,
0,
0,
v_APCI3XXX_Interrupt,
i_APCI3XXX_Reset,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnWriteAnalogOutput,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
i_APCI3XXX_InsnConfigInitTTLIO,
i_APCI3XXX_InsnBitsTTLIO,
i_APCI3XXX_InsnReadTTLIO,
i_APCI3XXX_InsnWriteTTLIO},
#endif
};
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct addi_board))
static struct comedi_driver driver_addi = {
.driver_name = ADDIDATA_DRIVER_NAME,
.module = THIS_MODULE,
.attach = i_ADDI_Attach,
.detach = i_ADDI_Detach,
.num_names = n_boardtypes,
.board_name = &boardtypes[0].pc_DriverName,
.offset = sizeof(struct addi_board),
};
COMEDI_PCI_INITCLEANUP(driver_addi, addi_apci_tbl);
/*
+----------------------------------------------------------------------------+
| Function name :static int i_ADDI_Attach(struct comedi_device *dev, |
| struct comedi_devconfig *it) |
| |
+----------------------------------------------------------------------------+
| Task :Detects the card. |
| Configure the driver for a particular board. |
| This function does all the initializations and memory |
| allocation of data structures for the driver. |
+----------------------------------------------------------------------------+
| Input Parameters :struct comedi_device *dev |
| struct comedi_devconfig *it |
| |
+----------------------------------------------------------------------------+
| Return Value : 0 |
| |
+----------------------------------------------------------------------------+
*/
static int i_ADDI_Attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret, pages, i, n_subdevices;
unsigned int dw_Dummy;
resource_size_t io_addr[5];
unsigned int irq;
resource_size_t iobase_a, iobase_main, iobase_addon, iobase_reserved;
struct pcilst_struct *card = NULL;
unsigned char pci_bus, pci_slot, pci_func;
int i_Dma = 0;
ret = alloc_private(dev, sizeof(struct addi_private));
if (ret < 0)
return -ENOMEM;
if (!pci_list_builded) {
v_pci_card_list_init(this_board->i_VendorId, 1); /* 1 for displaying the list.. */
pci_list_builded = 1;
}
/* printk("comedi%d: "ADDIDATA_DRIVER_NAME": board=%s",dev->minor,this_board->pc_DriverName); */
if ((this_board->i_Dma) && (it->options[2] == 0)) {
i_Dma = 1;
}
card = ptr_select_and_alloc_pci_card(this_board->i_VendorId,
this_board->i_DeviceId,
it->options[0],
it->options[1], i_Dma);
if (card == NULL)
return -EIO;
devpriv->allocated = 1;
if ((i_pci_card_data(card, &pci_bus, &pci_slot, &pci_func, &io_addr[0],
&irq)) < 0) {
i_pci_card_free(card);
printk(" - Can't get AMCC data!\n");
return -EIO;
}
iobase_a = io_addr[0];
iobase_main = io_addr[1];
iobase_addon = io_addr[2];
iobase_reserved = io_addr[3];
printk("\nBus %d: Slot %d: Funct%d\nBase0: 0x%8llx\nBase1: 0x%8llx\nBase2: 0x%8llx\nBase3: 0x%8llx\n", pci_bus, pci_slot, pci_func, (unsigned long long)io_addr[0], (unsigned long long)io_addr[1], (unsigned long long)io_addr[2], (unsigned long long)io_addr[3]);
if ((this_board->pc_EepromChip == NULL)
|| (strcmp(this_board->pc_EepromChip, ADDIDATA_9054) != 0)) {
/************************************/
/* Test if more that 1 address used */
/************************************/
if (this_board->i_IorangeBase1 != 0) {
dev->iobase = (unsigned long)iobase_main; /* DAQ base address... */
} else {
dev->iobase = (unsigned long)iobase_a; /* DAQ base address... */
}
dev->board_name = this_board->pc_DriverName;
devpriv->amcc = card;
devpriv->iobase = (int) dev->iobase;
devpriv->i_IobaseAmcc = (int) iobase_a; /* AMCC base address... */
devpriv->i_IobaseAddon = (int) iobase_addon; /* ADD ON base address.... */
devpriv->i_IobaseReserved = (int) iobase_reserved;
devpriv->ps_BoardInfo = this_board;
} else {
dev->board_name = this_board->pc_DriverName;
dev->iobase = (unsigned long)io_addr[2];
devpriv->amcc = card;
devpriv->iobase = (int) io_addr[2];
devpriv->ps_BoardInfo = this_board;
devpriv->i_IobaseReserved = (int) io_addr[3];
printk("\nioremap begin");
devpriv->dw_AiBase = ioremap(io_addr[3],
this_board->i_IorangeBase3);
printk("\nioremap end");
}
/* ## */
if (irq > 0) {
if (request_irq(irq, v_ADDI_Interrupt, IRQF_SHARED,
this_board->pc_DriverName, dev) < 0) {
printk(", unable to allocate IRQ %u, DISABLING IT",
irq);
irq = 0; /* Can't use IRQ */
} else {
printk("\nirq=%u", irq);
}
} else {
printk(", IRQ disabled");
}
printk("\nOption %d %d %d\n", it->options[0], it->options[1],
it->options[2]);
dev->irq = irq;
/* Read eepeom and fill addi_board Structure */
if (this_board->i_PCIEeprom) {
printk("\nPCI Eeprom used");
if (!(strcmp(this_board->pc_EepromChip, "S5920"))) {
/* Set 3 wait stait */
if (!(strcmp(this_board->pc_DriverName, "apci035"))) {
outl(0x80808082, devpriv->i_IobaseAmcc + 0x60);
} else {
outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
}
/* Enable the interrupt for the controler */
dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
printk("\nEnable the interrupt for the controler");
}
printk("\nRead Eeprom");
i_EepromReadMainHeader(io_addr[0], this_board->pc_EepromChip,
dev);
} else {
printk("\nPCI Eeprom unused");
}
if (it->options[2] > 0) {
devpriv->us_UseDma = ADDI_DISABLE;
} else {
devpriv->us_UseDma = ADDI_ENABLE;
}
if (this_board->i_Dma) {
printk("\nDMA used");
if (devpriv->us_UseDma == ADDI_ENABLE) {
/* alloc DMA buffers */
devpriv->b_DmaDoubleBuffer = 0;
for (i = 0; i < 2; i++) {
for (pages = 4; pages >= 0; pages--) {
devpriv->ul_DmaBufferVirtual[i] =
(void *) __get_free_pages(GFP_KERNEL, pages);
if (devpriv->ul_DmaBufferVirtual[i])
break;
}
if (devpriv->ul_DmaBufferVirtual[i]) {
devpriv->ui_DmaBufferPages[i] = pages;
devpriv->ui_DmaBufferSize[i] =
PAGE_SIZE * pages;
devpriv->ui_DmaBufferSamples[i] =
devpriv->
ui_DmaBufferSize[i] >> 1;
devpriv->ul_DmaBufferHw[i] =
virt_to_bus((void *)devpriv->
ul_DmaBufferVirtual[i]);
}
}
if (!devpriv->ul_DmaBufferVirtual[0]) {
printk
(", Can't allocate DMA buffer, DMA disabled!");
devpriv->us_UseDma = ADDI_DISABLE;
}
if (devpriv->ul_DmaBufferVirtual[1]) {
devpriv->b_DmaDoubleBuffer = 1;
}
}
if ((devpriv->us_UseDma == ADDI_ENABLE)) {
printk("\nDMA ENABLED\n");
} else {
printk("\nDMA DISABLED\n");
}
}
if (!strcmp(this_board->pc_DriverName, "apci1710")) {
#ifdef CONFIG_APCI_1710
i_ADDI_AttachPCI1710(dev);
/* save base address */
devpriv->s_BoardInfos.ui_Address = io_addr[2];
#endif
} else {
/* Update-0.7.57->0.7.68dev->n_subdevices = 7; */
n_subdevices = 7;
ret = alloc_subdevices(dev, n_subdevices);
if (ret < 0)
return ret;
/* Allocate and Initialise AI Subdevice Structures */
s = dev->subdevices + 0;
if ((this_board->i_NbrAiChannel)
|| (this_board->i_NbrAiChannelDiff)) {
dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags =
SDF_READABLE | SDF_COMMON | SDF_GROUND
| SDF_DIFF;
if (this_board->i_NbrAiChannel) {
s->n_chan = this_board->i_NbrAiChannel;
devpriv->b_SingelDiff = 0;
} else {
s->n_chan = this_board->i_NbrAiChannelDiff;
devpriv->b_SingelDiff = 1;
}
s->maxdata = this_board->i_AiMaxdata;
s->len_chanlist = this_board->i_AiChannelList;
s->range_table = this_board->pr_AiRangelist;
/* Set the initialisation flag */
devpriv->b_AiInitialisation = 1;
s->insn_config =
this_board->i_hwdrv_InsnConfigAnalogInput;
s->insn_read = this_board->i_hwdrv_InsnReadAnalogInput;
s->insn_write =
this_board->i_hwdrv_InsnWriteAnalogInput;
s->insn_bits = this_board->i_hwdrv_InsnBitsAnalogInput;
s->do_cmdtest =
this_board->i_hwdrv_CommandTestAnalogInput;
s->do_cmd = this_board->i_hwdrv_CommandAnalogInput;
s->cancel = this_board->i_hwdrv_CancelAnalogInput;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Allocate and Initialise AO Subdevice Structures */
s = dev->subdevices + 1;
if (this_board->i_NbrAoChannel) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = this_board->i_NbrAoChannel;
s->maxdata = this_board->i_AoMaxdata;
s->len_chanlist = this_board->i_NbrAoChannel;
s->range_table = this_board->pr_AoRangelist;
s->insn_config =
this_board->i_hwdrv_InsnConfigAnalogOutput;
s->insn_write =
this_board->i_hwdrv_InsnWriteAnalogOutput;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Allocate and Initialise DI Subdevice Structures */
s = dev->subdevices + 2;
if (this_board->i_NbrDiChannel) {
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = this_board->i_NbrDiChannel;
s->maxdata = 1;
s->len_chanlist = this_board->i_NbrDiChannel;
s->range_table = &range_digital;
s->io_bits = 0; /* all bits input */
s->insn_config =
this_board->i_hwdrv_InsnConfigDigitalInput;
s->insn_read = this_board->i_hwdrv_InsnReadDigitalInput;
s->insn_write =
this_board->i_hwdrv_InsnWriteDigitalInput;
s->insn_bits = this_board->i_hwdrv_InsnBitsDigitalInput;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Allocate and Initialise DO Subdevice Structures */
s = dev->subdevices + 3;
if (this_board->i_NbrDoChannel) {
s->type = COMEDI_SUBD_DO;
s->subdev_flags =
SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = this_board->i_NbrDoChannel;
s->maxdata = this_board->i_DoMaxdata;
s->len_chanlist = this_board->i_NbrDoChannel;
s->range_table = &range_digital;
s->io_bits = 0xf; /* all bits output */
s->insn_config = this_board->i_hwdrv_InsnConfigDigitalOutput; /* for digital output memory.. */
s->insn_write =
this_board->i_hwdrv_InsnWriteDigitalOutput;
s->insn_bits =
this_board->i_hwdrv_InsnBitsDigitalOutput;
s->insn_read =
this_board->i_hwdrv_InsnReadDigitalOutput;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Allocate and Initialise Timer Subdevice Structures */
s = dev->subdevices + 4;
if (this_board->i_Timer) {
s->type = COMEDI_SUBD_TIMER;
s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 1;
s->maxdata = 0;
s->len_chanlist = 1;
s->range_table = &range_digital;
s->insn_write = this_board->i_hwdrv_InsnWriteTimer;
s->insn_read = this_board->i_hwdrv_InsnReadTimer;
s->insn_config = this_board->i_hwdrv_InsnConfigTimer;
s->insn_bits = this_board->i_hwdrv_InsnBitsTimer;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Allocate and Initialise TTL */
s = dev->subdevices + 5;
if (this_board->i_NbrTTLChannel) {
s->type = COMEDI_SUBD_TTLIO;
s->subdev_flags =
SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = this_board->i_NbrTTLChannel;
s->maxdata = 1;
s->io_bits = 0; /* all bits input */
s->len_chanlist = this_board->i_NbrTTLChannel;
s->range_table = &range_digital;
s->insn_config = this_board->i_hwdr_ConfigInitTTLIO;
s->insn_bits = this_board->i_hwdr_ReadTTLIOBits;
s->insn_read = this_board->i_hwdr_ReadTTLIOAllPortValue;
s->insn_write = this_board->i_hwdr_WriteTTLIOChlOnOff;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* EEPROM */
s = dev->subdevices + 6;
if (this_board->i_PCIEeprom) {
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->n_chan = 256;
s->maxdata = 0xffff;
s->insn_read = i_ADDIDATA_InsnReadEeprom;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
}
printk("\ni_ADDI_Attach end\n");
i_ADDI_Reset(dev);
devpriv->b_ValidDriver = 1;
return 0;
}
/*
+----------------------------------------------------------------------------+
| Function name : static int i_ADDI_Detach(struct comedi_device *dev) |
| |
| |
+----------------------------------------------------------------------------+
| Task : Deallocates resources of the addi_common driver |
| Free the DMA buffers, unregister irq. |
| |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev |
| |
| |
+----------------------------------------------------------------------------+
| Return Value : 0 |
| |
+----------------------------------------------------------------------------+
*/
static int i_ADDI_Detach(struct comedi_device *dev)
{
if (dev->private) {
if (devpriv->b_ValidDriver) {
i_ADDI_Reset(dev);
}
if (dev->irq) {
free_irq(dev->irq, dev);
}
if ((devpriv->ps_BoardInfo->pc_EepromChip == NULL)
|| (strcmp(devpriv->ps_BoardInfo->pc_EepromChip,
ADDIDATA_9054) != 0)) {
if (devpriv->allocated) {
i_pci_card_free(devpriv->amcc);
}
if (devpriv->ul_DmaBufferVirtual[0]) {
free_pages((unsigned long)devpriv->
ul_DmaBufferVirtual[0],
devpriv->ui_DmaBufferPages[0]);
}
if (devpriv->ul_DmaBufferVirtual[1]) {
free_pages((unsigned long)devpriv->
ul_DmaBufferVirtual[1],
devpriv->ui_DmaBufferPages[1]);
}
} else {
iounmap(devpriv->dw_AiBase);
if (devpriv->allocated) {
i_pci_card_free(devpriv->amcc);
}
}
if (pci_list_builded) {
/* v_pci_card_list_cleanup(PCI_VENDOR_ID_AMCC); */
v_pci_card_list_cleanup(this_board->i_VendorId);
pci_list_builded = 0;
}
}
return 0;
}
/*
+----------------------------------------------------------------------------+
| Function name : static int i_ADDI_Reset(struct comedi_device *dev) |
| |
+----------------------------------------------------------------------------+
| Task : Disables all interrupts, Resets digital output to low, |
| Set all analog output to low |
| |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev |
| |
| |
+----------------------------------------------------------------------------+
| Return Value : 0 |
| |
+----------------------------------------------------------------------------+
*/
static int i_ADDI_Reset(struct comedi_device *dev)
{
this_board->i_hwdrv_Reset(dev);
return 0;
}
/* Interrupt function */
/*
+----------------------------------------------------------------------------+
| Function name : |
|static void v_ADDI_Interrupt(int irq, void *d) |
| |
+----------------------------------------------------------------------------+
| Task : Registerd interrupt routine |
| |
+----------------------------------------------------------------------------+
| Input Parameters : int irq |
| |
| |
+----------------------------------------------------------------------------+
| Return Value : |
| |
+----------------------------------------------------------------------------+
*/
static irqreturn_t v_ADDI_Interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
this_board->v_hwdrv_Interrupt(irq, d);
return IRQ_RETVAL(1);
}
/* EEPROM Read Function */
/*
+----------------------------------------------------------------------------+
| Function name : |
|INT i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev,struct comedi_subdevice *s,
struct comedi_insn *insn,unsigned int *data)
| |
+----------------------------------------------------------------------------+
| Task : Read 256 words from EEPROM |
| |
+----------------------------------------------------------------------------+
| Input Parameters :(struct comedi_device *dev,struct comedi_subdevice *s,
struct comedi_insn *insn,unsigned int *data) |
| |
| |
+----------------------------------------------------------------------------+
| Return Value : |
| |
+----------------------------------------------------------------------------+
*/
static int i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned short w_Data;
unsigned short w_Address;
w_Address = CR_CHAN(insn->chanspec); /* address to be read as 0,1,2,3...255 */
w_Data = w_EepromReadWord(devpriv->i_IobaseAmcc,
this_board->pc_EepromChip, 0x100 + (2 * w_Address));
data[0] = w_Data;
/* multiplied by 2 bcozinput will be like 0,1,2...255 */
return insn->n;
}
| gpl-2.0 |
lukego/linux | drivers/gpio/gpio-loongson.c | 761 | 2612 | /*
* Loongson-2F/3A/3B GPIO Support
*
* Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
* Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
* Copyright (c) 2013 Hongbing Hu <huhb@lemote.com>
* Copyright (c) 2014 Huacai Chen <chenhc@lemote.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <asm/types.h>
#include <loongson.h>
#include <linux/gpio.h>
#define STLS2F_N_GPIO 4
#define STLS3A_N_GPIO 16
#ifdef CONFIG_CPU_LOONGSON3
#define LOONGSON_N_GPIO STLS3A_N_GPIO
#else
#define LOONGSON_N_GPIO STLS2F_N_GPIO
#endif
#define LOONGSON_GPIO_IN_OFFSET 16
static DEFINE_SPINLOCK(gpio_lock);
static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
u32 temp;
u32 mask;
spin_lock(&gpio_lock);
mask = 1 << gpio;
temp = LOONGSON_GPIOIE;
temp |= mask;
LOONGSON_GPIOIE = temp;
spin_unlock(&gpio_lock);
return 0;
}
static int loongson_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int level)
{
u32 temp;
u32 mask;
gpio_set_value(gpio, level);
spin_lock(&gpio_lock);
mask = 1 << gpio;
temp = LOONGSON_GPIOIE;
temp &= (~mask);
LOONGSON_GPIOIE = temp;
spin_unlock(&gpio_lock);
return 0;
}
static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
u32 val;
u32 mask;
mask = 1 << (gpio + LOONGSON_GPIO_IN_OFFSET);
spin_lock(&gpio_lock);
val = LOONGSON_GPIODATA;
spin_unlock(&gpio_lock);
return (val & mask) != 0;
}
static void loongson_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
u32 val;
u32 mask;
mask = 1 << gpio;
spin_lock(&gpio_lock);
val = LOONGSON_GPIODATA;
if (value)
val |= mask;
else
val &= (~mask);
LOONGSON_GPIODATA = val;
spin_unlock(&gpio_lock);
}
static struct gpio_chip loongson_chip = {
.label = "Loongson-gpio-chip",
.direction_input = loongson_gpio_direction_input,
.get = loongson_gpio_get_value,
.direction_output = loongson_gpio_direction_output,
.set = loongson_gpio_set_value,
.base = 0,
.ngpio = LOONGSON_N_GPIO,
.can_sleep = false,
};
static int __init loongson_gpio_setup(void)
{
return gpiochip_add(&loongson_chip);
}
postcore_initcall(loongson_gpio_setup);
| gpl-2.0 |
LorDClockaN/LorDmodSaga | net/mac80211/rc80211_minstrel.c | 761 | 15958 | /*
* Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on minstrel.c:
* Copyright (C) 2005-2007 Derek Smithies <derek@indranet.co.nz>
* Sponsored by Indranet Technologies Ltd
*
* Based on sample.c:
* Copyright (c) 2005 John Bicket
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include "rate.h"
#include "rc80211_minstrel.h"
#define SAMPLE_COLUMNS 10
#define SAMPLE_TBL(_mi, _idx, _col) \
_mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col]
/* convert mac80211 rate index to local array index */
static inline int
rix_to_ndx(struct minstrel_sta_info *mi, int rix)
{
int i = rix;
for (i = rix; i >= 0; i--)
if (mi->r[i].rix == rix)
break;
WARN_ON(i < 0);
return i;
}
static void
minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
{
u32 max_tp = 0, index_max_tp = 0, index_max_tp2 = 0;
u32 max_prob = 0, index_max_prob = 0;
u32 usecs;
u32 p;
int i;
mi->stats_update = jiffies;
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
usecs = mr->perfect_tx_time;
if (!usecs)
usecs = 1000000;
/* To avoid rounding issues, probabilities scale from 0 (0%)
* to 18000 (100%) */
if (mr->attempts) {
p = (mr->success * 18000) / mr->attempts;
mr->succ_hist += mr->success;
mr->att_hist += mr->attempts;
mr->cur_prob = p;
p = ((p * (100 - mp->ewma_level)) + (mr->probability *
mp->ewma_level)) / 100;
mr->probability = p;
mr->cur_tp = p * (1000000 / usecs);
}
mr->last_success = mr->success;
mr->last_attempts = mr->attempts;
mr->success = 0;
mr->attempts = 0;
/* Sample less often below the 10% chance of success.
* Sample less often above the 95% chance of success. */
if ((mr->probability > 17100) || (mr->probability < 1800)) {
mr->adjusted_retry_count = mr->retry_count >> 1;
if (mr->adjusted_retry_count > 2)
mr->adjusted_retry_count = 2;
mr->sample_limit = 4;
} else {
mr->sample_limit = -1;
mr->adjusted_retry_count = mr->retry_count;
}
if (!mr->adjusted_retry_count)
mr->adjusted_retry_count = 2;
}
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
if (max_tp < mr->cur_tp) {
index_max_tp = i;
max_tp = mr->cur_tp;
}
if (max_prob < mr->probability) {
index_max_prob = i;
max_prob = mr->probability;
}
}
max_tp = 0;
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
if (i == index_max_tp)
continue;
if (max_tp < mr->cur_tp) {
index_max_tp2 = i;
max_tp = mr->cur_tp;
}
}
mi->max_tp_rate = index_max_tp;
mi->max_tp_rate2 = index_max_tp2;
mi->max_prob_rate = index_max_prob;
}
static void
minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
struct minstrel_sta_info *mi = priv_sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *ar = info->status.rates;
int i, ndx;
int success;
success = !!(info->flags & IEEE80211_TX_STAT_ACK);
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (ar[i].idx < 0)
break;
ndx = rix_to_ndx(mi, ar[i].idx);
if (ndx < 0)
continue;
mi->r[ndx].attempts += ar[i].count;
if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))
mi->r[ndx].success += success;
}
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
mi->sample_count++;
if (mi->sample_deferred > 0)
mi->sample_deferred--;
}
static inline unsigned int
minstrel_get_retry_count(struct minstrel_rate *mr,
struct ieee80211_tx_info *info)
{
unsigned int retry = mr->adjusted_retry_count;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
retry = max(2U, min(mr->retry_count_rtscts, retry));
else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
retry = max(2U, min(mr->retry_count_cts, retry));
return retry;
}
static int
minstrel_get_next_sample(struct minstrel_sta_info *mi)
{
unsigned int sample_ndx;
sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column);
mi->sample_idx++;
if ((int) mi->sample_idx > (mi->n_rates - 2)) {
mi->sample_idx = 0;
mi->sample_column++;
if (mi->sample_column >= SAMPLE_COLUMNS)
mi->sample_column = 0;
}
return sample_ndx;
}
static void
minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
void *priv_sta, struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
struct ieee80211_tx_rate *ar = info->control.rates;
unsigned int ndx, sample_ndx = 0;
bool mrr;
bool sample_slower = false;
bool sample = false;
int i, delta;
int mrr_ndx[3];
int sample_rate;
if (rate_control_send_low(sta, priv_sta, txrc))
return;
mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot;
if (time_after(jiffies, mi->stats_update + (mp->update_interval *
HZ) / 1000))
minstrel_update_stats(mp, mi);
ndx = mi->max_tp_rate;
if (mrr)
sample_rate = mp->lookaround_rate_mrr;
else
sample_rate = mp->lookaround_rate;
mi->packet_count++;
delta = (mi->packet_count * sample_rate / 100) -
(mi->sample_count + mi->sample_deferred / 2);
/* delta > 0: sampling required */
if ((delta > 0) && (mrr || !mi->prev_sample)) {
struct minstrel_rate *msr;
if (mi->packet_count >= 10000) {
mi->sample_deferred = 0;
mi->sample_count = 0;
mi->packet_count = 0;
} else if (delta > mi->n_rates * 2) {
/* With multi-rate retry, not every planned sample
* attempt actually gets used, due to the way the retry
* chain is set up - [max_tp,sample,prob,lowest] for
* sample_rate < max_tp.
*
* If there's too much sampling backlog and the link
* starts getting worse, minstrel would start bursting
* out lots of sampling frames, which would result
* in a large throughput loss. */
mi->sample_count += (delta - mi->n_rates * 2);
}
sample_ndx = minstrel_get_next_sample(mi);
msr = &mi->r[sample_ndx];
sample = true;
sample_slower = mrr && (msr->perfect_tx_time >
mi->r[ndx].perfect_tx_time);
if (!sample_slower) {
if (msr->sample_limit != 0) {
ndx = sample_ndx;
mi->sample_count++;
if (msr->sample_limit > 0)
msr->sample_limit--;
} else {
sample = false;
}
} else {
/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
* packets that have the sampling rate deferred to the
* second MRR stage. Increase the sample counter only
* if the deferred sample rate was actually used.
* Use the sample_deferred counter to make sure that
* the sampling is not done in large bursts */
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
mi->sample_deferred++;
}
}
mi->prev_sample = sample;
/* If we're not using MRR and the sampling rate already
* has a probability of >95%, we shouldn't be attempting
* to use it, as this only wastes precious airtime */
if (!mrr && sample && (mi->r[ndx].probability > 17100))
ndx = mi->max_tp_rate;
ar[0].idx = mi->r[ndx].rix;
ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info);
if (!mrr) {
if (!sample)
ar[0].count = mp->max_retry;
ar[1].idx = mi->lowest_rix;
ar[1].count = mp->max_retry;
return;
}
/* MRR setup */
if (sample) {
if (sample_slower)
mrr_ndx[0] = sample_ndx;
else
mrr_ndx[0] = mi->max_tp_rate;
} else {
mrr_ndx[0] = mi->max_tp_rate2;
}
mrr_ndx[1] = mi->max_prob_rate;
mrr_ndx[2] = 0;
for (i = 1; i < 4; i++) {
ar[i].idx = mi->r[mrr_ndx[i - 1]].rix;
ar[i].count = mi->r[mrr_ndx[i - 1]].adjusted_retry_count;
}
}
static void
calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local,
struct minstrel_rate *d, struct ieee80211_rate *rate)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
d->perfect_tx_time = ieee80211_frame_duration(local, 1200,
rate->bitrate, erp, 1);
d->ack_time = ieee80211_frame_duration(local, 10,
rate->bitrate, erp, 1);
}
static void
init_sample_table(struct minstrel_sta_info *mi)
{
unsigned int i, col, new_idx;
unsigned int n_srates = mi->n_rates - 1;
u8 rnd[8];
mi->sample_column = 0;
mi->sample_idx = 0;
memset(mi->sample_table, 0, SAMPLE_COLUMNS * mi->n_rates);
for (col = 0; col < SAMPLE_COLUMNS; col++) {
for (i = 0; i < n_srates; i++) {
get_random_bytes(rnd, sizeof(rnd));
new_idx = (i + rnd[i & 7]) % n_srates;
while (SAMPLE_TBL(mi, new_idx, col) != 0)
new_idx = (new_idx + 1) % n_srates;
/* Don't sample the slowest rate (i.e. slowest base
* rate). We must presume that the slowest rate works
* fine, or else other management frames will also be
* failing and the link will break */
SAMPLE_TBL(mi, new_idx, col) = i + 1;
}
}
}
static void
minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
struct ieee80211_local *local = hw_to_local(mp->hw);
struct ieee80211_rate *ctl_rate;
unsigned int i, n = 0;
unsigned int t_slot = 9; /* FIXME: get real slot time */
mi->lowest_rix = rate_lowest_index(sband, sta);
ctl_rate = &sband->bitrates[mi->lowest_rix];
mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
for (i = 0; i < sband->n_bitrates; i++) {
struct minstrel_rate *mr = &mi->r[n];
unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
unsigned int tx_time_single;
unsigned int cw = mp->cw_min;
if (!rate_supported(sta, sband->band, i))
continue;
n++;
memset(mr, 0, sizeof(*mr));
mr->rix = i;
mr->bitrate = sband->bitrates[i].bitrate / 5;
calc_rate_durations(mi, local, mr,
&sband->bitrates[i]);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
mr->sample_limit = -1;
mr->retry_count = 1;
mr->retry_count_cts = 1;
mr->retry_count_rtscts = 1;
tx_time = mr->perfect_tx_time + mi->sp_ack_dur;
do {
/* add one retransmission */
tx_time_single = mr->ack_time + mr->perfect_tx_time;
/* contention window */
tx_time_single += t_slot + min(cw, mp->cw_max);
cw = (cw << 1) | 1;
tx_time += tx_time_single;
tx_time_cts += tx_time_single + mi->sp_ack_dur;
tx_time_rtscts += tx_time_single + 2 * mi->sp_ack_dur;
if ((tx_time_cts < mp->segment_size) &&
(mr->retry_count_cts < mp->max_retry))
mr->retry_count_cts++;
if ((tx_time_rtscts < mp->segment_size) &&
(mr->retry_count_rtscts < mp->max_retry))
mr->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
(++mr->retry_count < mp->max_retry));
mr->adjusted_retry_count = mr->retry_count;
}
for (i = n; i < sband->n_bitrates; i++) {
struct minstrel_rate *mr = &mi->r[i];
mr->rix = -1;
}
mi->n_rates = n;
mi->stats_update = jiffies;
init_sample_table(mi);
}
static void *
minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct ieee80211_supported_band *sband;
struct minstrel_sta_info *mi;
struct minstrel_priv *mp = priv;
struct ieee80211_hw *hw = mp->hw;
int max_rates = 0;
int i;
mi = kzalloc(sizeof(struct minstrel_sta_info), gfp);
if (!mi)
return NULL;
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
sband = hw->wiphy->bands[i];
if (sband && sband->n_bitrates > max_rates)
max_rates = sband->n_bitrates;
}
mi->r = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
if (!mi->r)
goto error;
mi->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
if (!mi->sample_table)
goto error1;
mi->stats_update = jiffies;
return mi;
error1:
kfree(mi->r);
error:
kfree(mi);
return NULL;
}
static void
minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
kfree(mi->sample_table);
kfree(mi->r);
kfree(mi);
}
static void *
minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
struct minstrel_priv *mp;
mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
if (!mp)
return NULL;
/* contention window settings
* Just an approximation. Using the per-queue values would complicate
* the calculations and is probably unnecessary */
mp->cw_min = 15;
mp->cw_max = 1023;
/* number of packets (in %) to use for sampling other rates
* sample less often for non-mrr packets, because the overhead
* is much higher than with mrr */
mp->lookaround_rate = 5;
mp->lookaround_rate_mrr = 10;
/* moving average weight for EWMA */
mp->ewma_level = 75;
/* maximum time that the hw is allowed to stay in one MRR segment */
mp->segment_size = 6000;
if (hw->max_rate_tries > 0)
mp->max_retry = hw->max_rate_tries;
else
/* safe default, does not necessarily have to match hw properties */
mp->max_retry = 7;
if (hw->max_rates >= 4)
mp->has_mrr = true;
mp->hw = hw;
mp->update_interval = 100;
return mp;
}
static void
minstrel_free(void *priv)
{
kfree(priv);
}
struct rate_control_ops mac80211_minstrel = {
.name = "minstrel",
.tx_status = minstrel_tx_status,
.get_rate = minstrel_get_rate,
.rate_init = minstrel_rate_init,
.alloc = minstrel_alloc,
.free = minstrel_free,
.alloc_sta = minstrel_alloc_sta,
.free_sta = minstrel_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = minstrel_add_sta_debugfs,
.remove_sta_debugfs = minstrel_remove_sta_debugfs,
#endif
};
int __init
rc80211_minstrel_init(void)
{
return ieee80211_rate_control_register(&mac80211_minstrel);
}
void
rc80211_minstrel_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_minstrel);
}
| gpl-2.0 |
NX511J-dev/kernel_zte_nx511j | drivers/staging/vt6655/device_main.c | 1273 | 102381 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: device_main.c
*
* Purpose: driver entry for initial, open, close, tx and rx.
*
* Author: Lyndon Chen
*
* Date: Jan 8, 2003
*
* Functions:
*
* vt6655_probe - module initial (insmod) driver entry
* vt6655_remove - module remove entry
* vt6655_init_info - device structure resource allocation function
* device_free_info - device structure resource free function
* device_get_pci_info - get allocated pci io/mem resource
* device_print_info - print out resource
* device_open - allocate dma/descripter resource & initial mac/bbp function
* device_xmit - asynchrous data tx function
* device_intr - interrupt handle function
* device_set_multi - set mac filter
* device_ioctl - ioctl entry
* device_close - shutdown mac/bbp & free dma/descripter resource
* device_rx_srv - rx service function
* device_receive_frame - rx data function
* device_alloc_rx_buf - rx buffer pre-allocated function
* device_alloc_frag_buf - rx fragement pre-allocated function
* device_free_tx_buf - free tx buffer function
* device_free_frag_buf- free de-fragement buffer
* device_dma0_tx_80211- tx 802.11 frame via dma0
* device_dma0_xmit- tx PS bufferred frame via dma0
* device_init_rd0_ring- initial rd dma0 ring
* device_init_rd1_ring- initial rd dma1 ring
* device_init_td0_ring- initial tx dma0 ring buffer
* device_init_td1_ring- initial tx dma1 ring buffer
* device_init_registers- initial MAC & BBP & RF internal registers.
* device_init_rings- initial tx/rx ring buffer
* device_init_defrag_cb- initial & allocate de-fragement buffer.
* device_free_rings- free all allocated ring buffer
* device_tx_srv- tx interrupt service function
*
* Revision History:
*/
#undef __NO_VERSION__
#include <linux/file.h>
#include "device.h"
#include "card.h"
#include "channel.h"
#include "baseband.h"
#include "mac.h"
#include "tether.h"
#include "wmgr.h"
#include "wctl.h"
#include "power.h"
#include "wcmd.h"
#include "iocmd.h"
#include "tcrc.h"
#include "rxtx.h"
#include "wroute.h"
#include "bssdb.h"
#include "hostap.h"
#include "wpactl.h"
#include "ioctl.h"
#include "iwctl.h"
#include "dpc.h"
#include "datarate.h"
#include "rf.h"
#include "iowpa.h"
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
/*--------------------- Static Definitions -------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel = MSG_LEVEL_INFO;
//
// Define module options
//
MODULE_AUTHOR("VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
static int mlme_kill;
//static struct task_struct * mlme_task;
#define DEVICE_PARAM(N, D)
/*
static const int N[MAX_UINTS]=OPTION_DEFAULT;\
MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UINTS) "i");\
MODULE_PARM_DESC(N, D);
*/
#define RX_DESC_MIN0 16
#define RX_DESC_MAX0 128
#define RX_DESC_DEF0 32
DEVICE_PARAM(RxDescriptors0, "Number of receive descriptors0");
#define RX_DESC_MIN1 16
#define RX_DESC_MAX1 128
#define RX_DESC_DEF1 32
DEVICE_PARAM(RxDescriptors1, "Number of receive descriptors1");
#define TX_DESC_MIN0 16
#define TX_DESC_MAX0 128
#define TX_DESC_DEF0 32
DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
#define TX_DESC_MIN1 16
#define TX_DESC_MAX1 128
#define TX_DESC_DEF1 64
DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
#define IP_ALIG_DEF 0
/* IP_byte_align[] is used for IP header unsigned long byte aligned
0: indicate the IP header won't be unsigned long byte aligned.(Default) .
1: indicate the IP header will be unsigned long byte aligned.
In some environment, the IP header should be unsigned long byte aligned,
or the packet will be droped when we receive it. (eg: IPVS)
*/
DEVICE_PARAM(IP_byte_align, "Enable IP header dword aligned");
#define INT_WORKS_DEF 20
#define INT_WORKS_MIN 10
#define INT_WORKS_MAX 64
DEVICE_PARAM(int_works, "Number of packets per interrupt services");
#define CHANNEL_MIN 1
#define CHANNEL_MAX 14
#define CHANNEL_DEF 6
DEVICE_PARAM(Channel, "Channel number");
/* PreambleType[] is the preamble length used for transmit.
0: indicate allows long preamble type
1: indicate allows short preamble type
*/
#define PREAMBLE_TYPE_DEF 1
DEVICE_PARAM(PreambleType, "Preamble Type");
#define RTS_THRESH_MIN 512
#define RTS_THRESH_MAX 2347
#define RTS_THRESH_DEF 2347
DEVICE_PARAM(RTSThreshold, "RTS threshold");
#define FRAG_THRESH_MIN 256
#define FRAG_THRESH_MAX 2346
#define FRAG_THRESH_DEF 2346
DEVICE_PARAM(FragThreshold, "Fragmentation threshold");
#define DATA_RATE_MIN 0
#define DATA_RATE_MAX 13
#define DATA_RATE_DEF 13
/* datarate[] index
0: indicate 1 Mbps 0x02
1: indicate 2 Mbps 0x04
2: indicate 5.5 Mbps 0x0B
3: indicate 11 Mbps 0x16
4: indicate 6 Mbps 0x0c
5: indicate 9 Mbps 0x12
6: indicate 12 Mbps 0x18
7: indicate 18 Mbps 0x24
8: indicate 24 Mbps 0x30
9: indicate 36 Mbps 0x48
10: indicate 48 Mbps 0x60
11: indicate 54 Mbps 0x6c
12: indicate 72 Mbps 0x90
13: indicate auto rate
*/
DEVICE_PARAM(ConnectionRate, "Connection data rate");
#define OP_MODE_DEF 0
DEVICE_PARAM(OPMode, "Infrastruct, adhoc, AP mode ");
/* OpMode[] is used for transmit.
0: indicate infrastruct mode used
1: indicate adhoc mode used
2: indicate AP mode used
*/
/* PSMode[]
0: indicate disable power saving mode
1: indicate enable power saving mode
*/
#define PS_MODE_DEF 0
DEVICE_PARAM(PSMode, "Power saving mode");
#define SHORT_RETRY_MIN 0
#define SHORT_RETRY_MAX 31
#define SHORT_RETRY_DEF 8
DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
#define LONG_RETRY_MIN 0
#define LONG_RETRY_MAX 15
#define LONG_RETRY_DEF 4
DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
/* BasebandType[] baseband type selected
0: indicate 802.11a type
1: indicate 802.11b type
2: indicate 802.11g type
*/
#define BBP_TYPE_MIN 0
#define BBP_TYPE_MAX 2
#define BBP_TYPE_DEF 2
DEVICE_PARAM(BasebandType, "baseband type");
/* 80211hEnable[]
0: indicate disable 802.11h
1: indicate enable 802.11h
*/
#define X80211h_MODE_DEF 0
DEVICE_PARAM(b80211hEnable, "802.11h mode");
/* 80211hEnable[]
0: indicate disable 802.11h
1: indicate enable 802.11h
*/
#define DIVERSITY_ANT_DEF 0
DEVICE_PARAM(bDiversityANTEnable, "ANT diversity mode");
//
// Static vars definitions
//
static int device_nics = 0;
static PSDevice pDevice_Infos = NULL;
static struct net_device *root_device_dev = NULL;
static CHIP_INFO chip_info_table[] = {
{ VT3253, "VIA Networking Solomon-A/B/G Wireless LAN Adapter ",
256, 1, DEVICE_FLAGS_IP_ALIGN|DEVICE_FLAGS_TX_ALIGN },
{0, NULL}
};
DEFINE_PCI_DEVICE_TABLE(vt6655_pci_id_table) = {
{ PCI_VDEVICE(VIA, 0x3253), (kernel_ulong_t)chip_info_table},
{ 0, }
};
/*--------------------- Static Functions --------------------------*/
static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
static void vt6655_init_info(struct pci_dev *pcid, PSDevice *ppDevice, PCHIP_INFO);
static void device_free_info(PSDevice pDevice);
static bool device_get_pci_info(PSDevice, struct pci_dev *pcid);
static void device_print_info(PSDevice pDevice);
static struct net_device_stats *device_get_stats(struct net_device *dev);
static void device_init_diversity_timer(PSDevice pDevice);
static int device_open(struct net_device *dev);
static int device_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t device_intr(int irq, void *dev_instance);
static void device_set_multi(struct net_device *dev);
static int device_close(struct net_device *dev);
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#ifdef CONFIG_PM
static int device_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
static int viawget_suspend(struct pci_dev *pcid, pm_message_t state);
static int viawget_resume(struct pci_dev *pcid);
struct notifier_block device_notifier = {
.notifier_call = device_notify_reboot,
.next = NULL,
.priority = 0,
};
#endif
static void device_init_rd0_ring(PSDevice pDevice);
static void device_init_rd1_ring(PSDevice pDevice);
static void device_init_defrag_cb(PSDevice pDevice);
static void device_init_td0_ring(PSDevice pDevice);
static void device_init_td1_ring(PSDevice pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
//2008-0714<Add>by Mike Liu
static bool device_release_WPADEV(PSDevice pDevice);
static int ethtool_ioctl(struct net_device *dev, void *useraddr);
static int device_rx_srv(PSDevice pDevice, unsigned int uIdx);
static int device_tx_srv(PSDevice pDevice, unsigned int uIdx);
static bool device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pDesc);
static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType);
static void device_free_tx_buf(PSDevice pDevice, PSTxDesc pDesc);
static void device_free_td0_ring(PSDevice pDevice);
static void device_free_td1_ring(PSDevice pDevice);
static void device_free_rd0_ring(PSDevice pDevice);
static void device_free_rd1_ring(PSDevice pDevice);
static void device_free_rings(PSDevice pDevice);
static void device_free_frag_buf(PSDevice pDevice);
static int Config_FileGetParameter(unsigned char *string,
unsigned char *dest, unsigned char *source);
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
static char *get_chip_name(int chip_id)
{
int i;
for (i = 0; chip_info_table[i].name != NULL; i++)
if (chip_info_table[i].chip_id == chip_id)
break;
return chip_info_table[i].name;
}
static void vt6655_remove(struct pci_dev *pcid)
{
PSDevice pDevice = pci_get_drvdata(pcid);
if (pDevice == NULL)
return;
device_free_info(pDevice);
}
/*
static void
device_set_int_opt(int *opt, int val, int min, int max, int def,char* name,char* devname) {
if (val==-1)
*opt=def;
else if (val<min || val>max) {
DBG_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n" ,
devname,name, min,max);
*opt=def;
} else {
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
devname, name, val);
*opt=val;
}
}
static void
device_set_bool_opt(unsigned int *opt, int val,bool def,u32 flag, char* name,char* devname) {
(*opt)&=(~flag);
if (val==-1)
*opt|=(def ? flag : 0);
else if (val<0 || val>1) {
DBG_PRT(MSG_LEVEL_INFO, KERN_NOTICE
"%s: the value of parameter %s is invalid, the valid range is (0-1)\n",devname,name);
*opt|=(def ? flag : 0);
} else {
DBG_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: set parameter %s to %s\n",
devname,name , val ? "true" : "false");
*opt|=(val ? flag : 0);
}
}
*/
static void device_get_options(PSDevice pDevice, int index, char *devname)
{
POPTIONS pOpts = &(pDevice->sOpts);
pOpts->nRxDescs0 = RX_DESC_DEF0;
pOpts->nRxDescs1 = RX_DESC_DEF1;
pOpts->nTxDescs[0] = TX_DESC_DEF0;
pOpts->nTxDescs[1] = TX_DESC_DEF1;
pOpts->flags |= DEVICE_FLAGS_IP_ALIGN;
pOpts->int_works = INT_WORKS_DEF;
pOpts->rts_thresh = RTS_THRESH_DEF;
pOpts->frag_thresh = FRAG_THRESH_DEF;
pOpts->data_rate = DATA_RATE_DEF;
pOpts->channel_num = CHANNEL_DEF;
pOpts->flags |= DEVICE_FLAGS_PREAMBLE_TYPE;
pOpts->flags |= DEVICE_FLAGS_OP_MODE;
//pOpts->flags|=DEVICE_FLAGS_PS_MODE;
pOpts->short_retry = SHORT_RETRY_DEF;
pOpts->long_retry = LONG_RETRY_DEF;
pOpts->bbp_type = BBP_TYPE_DEF;
pOpts->flags |= DEVICE_FLAGS_80211h_MODE;
pOpts->flags |= DEVICE_FLAGS_DiversityANT;
}
static void
device_set_options(PSDevice pDevice) {
unsigned char abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
unsigned char abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
unsigned char abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
pDevice->uChannel = pDevice->sOpts.channel_num;
pDevice->wRTSThreshold = pDevice->sOpts.rts_thresh;
pDevice->wFragmentationThreshold = pDevice->sOpts.frag_thresh;
pDevice->byShortRetryLimit = pDevice->sOpts.short_retry;
pDevice->byLongRetryLimit = pDevice->sOpts.long_retry;
pDevice->wMaxTransmitMSDULifetime = DEFAULT_MSDU_LIFETIME;
pDevice->byShortPreamble = (pDevice->sOpts.flags & DEVICE_FLAGS_PREAMBLE_TYPE) ? 1 : 0;
pDevice->byOpMode = (pDevice->sOpts.flags & DEVICE_FLAGS_OP_MODE) ? 1 : 0;
pDevice->ePSMode = (pDevice->sOpts.flags & DEVICE_FLAGS_PS_MODE) ? 1 : 0;
pDevice->b11hEnable = (pDevice->sOpts.flags & DEVICE_FLAGS_80211h_MODE) ? 1 : 0;
pDevice->bDiversityRegCtlON = (pDevice->sOpts.flags & DEVICE_FLAGS_DiversityANT) ? 1 : 0;
pDevice->uConnectionRate = pDevice->sOpts.data_rate;
if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = true;
pDevice->byBBType = pDevice->sOpts.bbp_type;
pDevice->byPacketType = pDevice->byBBType;
//PLICE_DEBUG->
pDevice->byAutoFBCtrl = AUTO_FB_0;
//pDevice->byAutoFBCtrl = AUTO_FB_1;
//PLICE_DEBUG<-
pDevice->bUpdateBBVGA = true;
pDevice->byFOETuning = 0;
pDevice->wCTSDuration = 0;
pDevice->byPreambleType = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " uChannel= %d\n", (int)pDevice->uChannel);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " byOpMode= %d\n", (int)pDevice->byOpMode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " ePSMode= %d\n", (int)pDevice->ePSMode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " wRTSThreshold= %d\n", (int)pDevice->wRTSThreshold);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " byShortRetryLimit= %d\n", (int)pDevice->byShortRetryLimit);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " byLongRetryLimit= %d\n", (int)pDevice->byLongRetryLimit);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " byPreambleType= %d\n", (int)pDevice->byPreambleType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " byShortPreamble= %d\n", (int)pDevice->byShortPreamble);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " uConnectionRate= %d\n", (int)pDevice->uConnectionRate);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " byBBType= %d\n", (int)pDevice->byBBType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " pDevice->b11hEnable= %d\n", (int)pDevice->b11hEnable);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " pDevice->bDiversityRegCtlON= %d\n", (int)pDevice->bDiversityRegCtlON);
}
static void s_vCompleteCurrentMeasure(PSDevice pDevice, unsigned char byResult)
{
unsigned int ii;
unsigned long dwDuration = 0;
unsigned char byRPI0 = 0;
for (ii = 1; ii < 8; ii++) {
pDevice->dwRPIs[ii] *= 255;
dwDuration |= *((unsigned short *)(pDevice->pCurrMeasureEID->sReq.abyDuration));
dwDuration <<= 10;
pDevice->dwRPIs[ii] /= dwDuration;
pDevice->abyRPIs[ii] = (unsigned char)pDevice->dwRPIs[ii];
byRPI0 += pDevice->abyRPIs[ii];
}
pDevice->abyRPIs[0] = (0xFF - byRPI0);
if (pDevice->uNumOfMeasureEIDs == 0) {
VNTWIFIbMeasureReport(pDevice->pMgmt,
true,
pDevice->pCurrMeasureEID,
byResult,
pDevice->byBasicMap,
pDevice->byCCAFraction,
pDevice->abyRPIs
);
} else {
VNTWIFIbMeasureReport(pDevice->pMgmt,
false,
pDevice->pCurrMeasureEID,
byResult,
pDevice->byBasicMap,
pDevice->byCCAFraction,
pDevice->abyRPIs
);
CARDbStartMeasure(pDevice, pDevice->pCurrMeasureEID++, pDevice->uNumOfMeasureEIDs);
}
}
//
// Initialisation of MAC & BBP registers
//
static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
{
unsigned int ii;
unsigned char byValue;
unsigned char byValue1;
unsigned char byCCKPwrdBm = 0;
unsigned char byOFDMPwrdBm = 0;
int zonetype = 0;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
MACbShutdown(pDevice->PortOffset);
BBvSoftwareReset(pDevice->PortOffset);
if ((InitType == DEVICE_INIT_COLD) ||
(InitType == DEVICE_INIT_DXPL)) {
// Do MACbSoftwareReset in MACvInitialize
MACbSoftwareReset(pDevice->PortOffset);
// force CCK
pDevice->bCCK = true;
pDevice->bAES = false;
pDevice->bProtectMode = false; //Only used in 11g type, sync with ERP IE
pDevice->bNonERPPresent = false;
pDevice->bBarkerPreambleMd = false;
pDevice->wCurrentRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_24M;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byRevId = 0; //Target to IF pin while programming to RF chip.
// init MAC
MACvInitialize(pDevice->PortOffset);
// Get Local ID
VNSvInPortB(pDevice->PortOffset + MAC_REG_LOCALID, &(pDevice->byLocalID));
spin_lock_irq(&pDevice->lock);
SROMvReadAllContents(pDevice->PortOffset, pDevice->abyEEPROM);
spin_unlock_irq(&pDevice->lock);
// Get Channel range
pDevice->byMinChannel = 1;
pDevice->byMaxChannel = CB_MAX_CHANNEL;
// Get Antena
byValue = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
if (byValue & EEP_ANTINV)
pDevice->bTxRxAntInv = true;
else
pDevice->bTxRxAntInv = false;
byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byValue == 0) // if not set default is All
byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
pDevice->ulDiversityNValue = 100*260;//100*SROMbyReadEmbedded(pDevice->PortOffset, 0x51);
pDevice->ulDiversityMValue = 100*16;//SROMbyReadEmbedded(pDevice->PortOffset, 0x52);
pDevice->byTMax = 1;//SROMbyReadEmbedded(pDevice->PortOffset, 0x53);
pDevice->byTMax2 = 4;//SROMbyReadEmbedded(pDevice->PortOffset, 0x54);
pDevice->ulSQ3TH = 0;//(unsigned long) SROMbyReadEmbedded(pDevice->PortOffset, 0x55);
pDevice->byTMax3 = 64;//SROMbyReadEmbedded(pDevice->PortOffset, 0x56);
if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
pDevice->byAntennaCount = 2;
pDevice->byTxAntennaMode = ANT_B;
pDevice->dwTxAntennaSel = 1;
pDevice->dwRxAntennaSel = 1;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
// chester for antenna
byValue1 = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
if ((byValue1 & 0x08) == 0)
pDevice->bDiversityEnable = false;//SROMbyReadEmbedded(pDevice->PortOffset, 0x50);
else
pDevice->bDiversityEnable = true;
} else {
pDevice->bDiversityEnable = false;
pDevice->byAntennaCount = 1;
pDevice->dwTxAntennaSel = 0;
pDevice->dwRxAntennaSel = 0;
if (byValue & EEP_ANTENNA_AUX) {
pDevice->byTxAntennaMode = ANT_A;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_B;
else
pDevice->byRxAntennaMode = ANT_A;
} else {
pDevice->byTxAntennaMode = ANT_B;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
}
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bDiversityEnable=[%d],NValue=[%d],MValue=[%d],TMax=[%d],TMax2=[%d]\n",
pDevice->bDiversityEnable, (int)pDevice->ulDiversityNValue, (int)pDevice->ulDiversityMValue, pDevice->byTMax, pDevice->byTMax2);
//#ifdef ZoneType_DefaultSetting
//2008-8-4 <add> by chester
//zonetype initial
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
zonetype = Config_FileOperation(pDevice, false, NULL);
if (zonetype >= 0) { //read zonetype file ok!
if ((zonetype == 0) &&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x00)) { //for USA
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Init Zone Type :USA\n");
} else if ((zonetype == 1) &&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x01)) { //for Japan
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
} else if ((zonetype == 2) &&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] != 0x02)) { //for Europe
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Init Zone Type :Europe\n");
}
else {
if (zonetype != pDevice->abyEEPROM[EEP_OFS_ZONETYPE])
printk("zonetype in file[%02x] mismatch with in EEPROM[%02x]\n", zonetype, pDevice->abyEEPROM[EEP_OFS_ZONETYPE]);
else
printk("Read Zonetype file success,use default zonetype setting[%02x]\n", zonetype);
}
} else
printk("Read Zonetype file fail,use default zonetype setting[%02x]\n", SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ZONETYPE));
// Get RFType
pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
if ((pDevice->byRFType & RF_EMU) != 0) {
// force change RevID for VT3253 emu
pDevice->byRevId = 0x80;
}
pDevice->byRFType &= RF_MASK;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRFType = %x\n", pDevice->byRFType);
if (pDevice->bZoneRegExist == false) {
pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byZoneType = %x\n", pDevice->byZoneType);
//Init RF module
RFbInit(pDevice);
//Get Desire Power Value
pDevice->byCurPwr = 0xFF;
pDevice->byCCKPwr = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_PWR_CCK);
pDevice->byOFDMPwrG = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_PWR_OFDMG);
//byCCKPwrdBm = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_CCK_PWR_dBm);
//byOFDMPwrdBm = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_OFDM_PWR_dBm);
// Load power Table
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
pDevice->abyCCKPwrTbl[ii + 1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
if (pDevice->abyCCKPwrTbl[ii + 1] == 0) {
pDevice->abyCCKPwrTbl[ii+1] = pDevice->byCCKPwr;
}
pDevice->abyOFDMPwrTbl[ii + 1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
if (pDevice->abyOFDMPwrTbl[ii + 1] == 0) {
pDevice->abyOFDMPwrTbl[ii + 1] = pDevice->byOFDMPwrG;
}
pDevice->abyCCKDefaultPwr[ii + 1] = byCCKPwrdBm;
pDevice->abyOFDMDefaultPwr[ii + 1] = byOFDMPwrdBm;
}
//2008-8-4 <add> by chester
//recover 12,13 ,14channel for EUROPE by 11 channel
if (((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe)) &&
(pDevice->byOriginalZonetype == ZoneType_USA)) {
for (ii = 11; ii < 14; ii++) {
pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
}
}
// Load OFDM A Power Table
for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) { //RobertYu:20041224, bug using CB_MAX_CHANNEL
pDevice->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
pDevice->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
init_channel_table((void *)pDevice);
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortB(pDevice->PortOffset + MAC_REG_MSRCTL + 1, (MSRCTL1_TXPWR | MSRCTL1_CSAPAREN));
MACvSelectPage0(pDevice->PortOffset);
}
// use relative tx timeout and 802.11i D4
MACvWordRegBitsOn(pDevice->PortOffset, MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
// set performance parameter by registry
MACvSetShortRetryLimit(pDevice->PortOffset, pDevice->byShortRetryLimit);
MACvSetLongRetryLimit(pDevice->PortOffset, pDevice->byLongRetryLimit);
// reset TSF counter
VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
// enable TSF counter
VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
// initialize BBP registers
BBbVT3253Init(pDevice);
if (pDevice->bUpdateBBVGA) {
pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
pDevice->byBBVGANew = pDevice->byBBVGACurrent;
BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
}
BBvSetRxAntennaMode(pDevice->PortOffset, pDevice->byRxAntennaMode);
BBvSetTxAntennaMode(pDevice->PortOffset, pDevice->byTxAntennaMode);
pDevice->byCurrentCh = 0;
//pDevice->NetworkType = Ndis802_11Automode;
// Set BB and packet type at the same time.
// Set Short Slot Time, xIFS, and RSPINF.
if (pDevice->uConnectionRate == RATE_AUTO) {
pDevice->wCurrentRate = RATE_54M;
} else {
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
// default G Mode
VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_11G);
VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_AUTO);
pDevice->bRadioOff = false;
pDevice->byRadioCtl = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RADIOCTL);
pDevice->bHWRadioOff = false;
if (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) {
// Get GPIO
MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO);
//2008-4-14 <add> by chester for led issue
#ifdef FOR_LED_ON_NOTEBOOK
if (pDevice->byGPIO & GPIO0_DATA) { pDevice->bHWRadioOff = true; }
if (!(pDevice->byGPIO & GPIO0_DATA)) { pDevice->bHWRadioOff = false; }
}
if ((pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
} else CARDbRadioPowerOn(pDevice);
#else
if (((pDevice->byGPIO & GPIO0_DATA) && !(pDevice->byRadioCtl & EEP_RADIOCTL_INV)) ||
(!(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV))) {
pDevice->bHWRadioOff = true;
}
}
if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
}
#endif
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
// get Permanent network address
SROMvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Network address = %pM\n",
pDevice->abyCurrentNetAddr);
// reset Tx pointer
CARDvSafeResetRx(pDevice);
// reset Rx pointer
CARDvSafeResetTx(pDevice);
if (pDevice->byLocalID <= REV_ID_VT3253_A1) {
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_WPAERR);
}
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
// Turn On Rx DMA
MACvReceive0(pDevice->PortOffset);
MACvReceive1(pDevice->PortOffset);
// start the adapter
MACvStart(pDevice->PortOffset);
netif_stop_queue(pDevice->dev);
}
static void device_init_diversity_timer(PSDevice pDevice) {
init_timer(&pDevice->TimerSQ3Tmax1);
pDevice->TimerSQ3Tmax1.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax2);
pDevice->TimerSQ3Tmax2.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax3);
pDevice->TimerSQ3Tmax3.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerState1CallBack;
pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
return;
}
static bool device_release_WPADEV(PSDevice pDevice)
{
viawget_wpa_header *wpahdr;
int ii = 0;
// wait_queue_head_t Set_wait;
//send device close to wpa_supplicnat layer
if (pDevice->bWPADEVUp == true) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_DEVICECLOSE_MSG;
wpahdr->resp_ie_len = 0;
wpahdr->req_ie_len = 0;
skb_put(pDevice->skb, sizeof(viawget_wpa_header));
pDevice->skb->dev = pDevice->wpadev;
skb_reset_mac_header(pDevice->skb);
pDevice->skb->pkt_type = PACKET_HOST;
pDevice->skb->protocol = htons(ETH_P_802_2);
memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
netif_rx(pDevice->skb);
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
//wait release WPADEV
// init_waitqueue_head(&Set_wait);
// wait_event_timeout(Set_wait, ((pDevice->wpadev==NULL)&&(pDevice->skb == NULL)),5*HZ); //1s wait
while ((pDevice->bWPADEVUp == true)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 20); //wait 50ms
ii++;
if (ii > 20)
break;
}
}
return true;
}
static const struct net_device_ops device_netdev_ops = {
.ndo_open = device_open,
.ndo_stop = device_close,
.ndo_do_ioctl = device_ioctl,
.ndo_get_stats = device_get_stats,
.ndo_start_xmit = device_xmit,
.ndo_set_rx_mode = device_set_multi,
};
static int
vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
{
static bool bFirst = true;
struct net_device *dev = NULL;
PCHIP_INFO pChip_info = (PCHIP_INFO)ent->driver_data;
PSDevice pDevice;
int rc;
if (device_nics++ >= MAX_UINTS) {
printk(KERN_NOTICE DEVICE_NAME ": already found %d NICs\n", device_nics);
return -ENODEV;
}
dev = alloc_etherdev(sizeof(DEVICE_INFO));
pDevice = (PSDevice) netdev_priv(dev);
if (dev == NULL) {
printk(KERN_ERR DEVICE_NAME ": allocate net device failed \n");
return -ENOMEM;
}
// Chain it all together
// SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pcid->dev);
if (bFirst) {
printk(KERN_NOTICE "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
printk(KERN_NOTICE "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
bFirst = false;
}
vt6655_init_info(pcid, &pDevice, pChip_info);
pDevice->dev = dev;
pDevice->next_module = root_device_dev;
root_device_dev = dev;
if (pci_enable_device(pcid)) {
device_free_info(pDevice);
return -ENODEV;
}
dev->irq = pcid->irq;
#ifdef DEBUG
printk("Before get pci_info memaddr is %x\n", pDevice->memaddr);
#endif
if (device_get_pci_info(pDevice, pcid) == false) {
printk(KERN_ERR DEVICE_NAME ": Failed to find PCI device.\n");
device_free_info(pDevice);
return -ENODEV;
}
#if 1
#ifdef DEBUG
//pci_read_config_byte(pcid, PCI_BASE_ADDRESS_0, &pDevice->byRevId);
printk("after get pci_info memaddr is %x, io addr is %x,io_size is %d\n", pDevice->memaddr, pDevice->ioaddr, pDevice->io_size);
{
int i;
u32 bar, len;
u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
PCI_BASE_ADDRESS_2,
PCI_BASE_ADDRESS_3,
PCI_BASE_ADDRESS_4,
PCI_BASE_ADDRESS_5,
0};
for (i = 0; address[i]; i++) {
//pci_write_config_dword(pcid,address[i], 0xFFFFFFFF);
pci_read_config_dword(pcid, address[i], &bar);
printk("bar %d is %x\n", i, bar);
if (!bar) {
printk("bar %d not implemented\n", i);
continue;
}
if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
/* This is IO */
len = bar & (PCI_BASE_ADDRESS_IO_MASK & 0xFFFF);
len = len & ~(len - 1);
printk("IO space: len in IO %x, BAR %d\n", len, i);
} else {
len = bar & 0xFFFFFFF0;
len = ~len + 1;
printk("len in MEM %x, BAR %d\n", len, i);
}
}
}
#endif
#endif
#ifdef DEBUG
//return 0;
#endif
pDevice->PortOffset = (unsigned long)ioremap(pDevice->memaddr & PCI_BASE_ADDRESS_MEM_MASK, pDevice->io_size);
//pDevice->PortOffset = (unsigned long)ioremap(pDevice->ioaddr & PCI_BASE_ADDRESS_IO_MASK, pDevice->io_size);
if (pDevice->PortOffset == 0) {
printk(KERN_ERR DEVICE_NAME ": Failed to IO remapping ..\n");
device_free_info(pDevice);
return -ENODEV;
}
rc = pci_request_regions(pcid, DEVICE_NAME);
if (rc) {
printk(KERN_ERR DEVICE_NAME ": Failed to find PCI device\n");
device_free_info(pDevice);
return -ENODEV;
}
dev->base_addr = pDevice->ioaddr;
#ifdef PLICE_DEBUG
unsigned char value;
VNSvInPortB(pDevice->PortOffset+0x4F, &value);
printk("Before write: value is %x\n", value);
//VNSvInPortB(pDevice->PortOffset+0x3F, 0x00);
VNSvOutPortB(pDevice->PortOffset, value);
VNSvInPortB(pDevice->PortOffset+0x4F, &value);
printk("After write: value is %x\n", value);
#endif
#ifdef IO_MAP
pDevice->PortOffset = pDevice->ioaddr;
#endif
// do reset
if (!MACbSoftwareReset(pDevice->PortOffset)) {
printk(KERN_ERR DEVICE_NAME ": Failed to access MAC hardware..\n");
device_free_info(pDevice);
return -ENODEV;
}
// initial to reload eeprom
MACvInitialize(pDevice->PortOffset);
MACvReadEtherAddress(pDevice->PortOffset, dev->dev_addr);
device_get_options(pDevice, device_nics-1, dev->name);
device_set_options(pDevice);
//Mask out the options cannot be set to the chip
pDevice->sOpts.flags &= pChip_info->flags;
//Enable the chip specified capabilities
pDevice->flags = pDevice->sOpts.flags | (pChip_info->flags & 0xFF000000UL);
pDevice->tx_80211 = device_dma0_tx_80211;
pDevice->sMgmtObj.pAdapter = (void *)pDevice;
pDevice->pMgmt = &(pDevice->sMgmtObj);
dev->irq = pcid->irq;
dev->netdev_ops = &device_netdev_ops;
dev->wireless_handlers = (struct iw_handler_def *)&iwctl_handler_def;
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n");
device_free_info(pDevice);
return -ENODEV;
}
device_print_info(pDevice);
pci_set_drvdata(pcid, pDevice);
return 0;
}
static void device_print_info(PSDevice pDevice)
{
struct net_device *dev = pDevice->dev;
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: %s\n", dev->name, get_chip_name(pDevice->chip_id));
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: MAC=%pM", dev->name, dev->dev_addr);
#ifdef IO_MAP
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO " IO=0x%lx ", (unsigned long)pDevice->ioaddr);
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO " IRQ=%d \n", pDevice->dev->irq);
#else
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO " IO=0x%lx Mem=0x%lx ",
(unsigned long)pDevice->ioaddr, (unsigned long)pDevice->PortOffset);
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO " IRQ=%d \n", pDevice->dev->irq);
#endif
}
static void vt6655_init_info(struct pci_dev *pcid, PSDevice *ppDevice,
PCHIP_INFO pChip_info) {
PSDevice p;
memset(*ppDevice, 0, sizeof(DEVICE_INFO));
if (pDevice_Infos == NULL) {
pDevice_Infos = *ppDevice;
} else {
for (p = pDevice_Infos; p->next != NULL; p = p->next)
do {} while (0);
p->next = *ppDevice;
(*ppDevice)->prev = p;
}
(*ppDevice)->pcid = pcid;
(*ppDevice)->chip_id = pChip_info->chip_id;
(*ppDevice)->io_size = pChip_info->io_size;
(*ppDevice)->nTxQueues = pChip_info->nTxQueue;
(*ppDevice)->multicast_limit = 32;
spin_lock_init(&((*ppDevice)->lock));
}
static bool device_get_pci_info(PSDevice pDevice, struct pci_dev *pcid) {
u16 pci_cmd;
u8 b;
unsigned int cis_addr;
#ifdef PLICE_DEBUG
unsigned char pci_config[256];
unsigned char value = 0x00;
int ii, j;
u16 max_lat = 0x0000;
memset(pci_config, 0x00, 256);
#endif
pci_read_config_byte(pcid, PCI_REVISION_ID, &pDevice->byRevId);
pci_read_config_word(pcid, PCI_SUBSYSTEM_ID, &pDevice->SubSystemID);
pci_read_config_word(pcid, PCI_SUBSYSTEM_VENDOR_ID, &pDevice->SubVendorID);
pci_read_config_word(pcid, PCI_COMMAND, (u16 *)&(pci_cmd));
pci_set_master(pcid);
pDevice->memaddr = pci_resource_start(pcid, 0);
pDevice->ioaddr = pci_resource_start(pcid, 1);
#ifdef DEBUG
// pDevice->ioaddr = pci_resource_start(pcid, 0);
// pDevice->memaddr = pci_resource_start(pcid,1);
#endif
cis_addr = pci_resource_start(pcid, 2);
pDevice->pcid = pcid;
pci_read_config_byte(pcid, PCI_COMMAND, &b);
pci_write_config_byte(pcid, PCI_COMMAND, (b|PCI_COMMAND_MASTER));
#ifdef PLICE_DEBUG
//pci_read_config_word(pcid,PCI_MAX_LAT,&max_lat);
//for (ii=0;ii<0xFF;ii++)
//pci_read_config_word(pcid,PCI_MAX_LAT,&max_lat);
//max_lat = 0x20;
//pci_write_config_word(pcid,PCI_MAX_LAT,max_lat);
//pci_read_config_word(pcid,PCI_MAX_LAT,&max_lat);
for (ii = 0; ii < 0xFF; ii++) {
pci_read_config_byte(pcid, ii, &value);
pci_config[ii] = value;
}
for (ii = 0, j = 1; ii < 0x100; ii++, j++) {
if (j % 16 == 0) {
printk("%x:", pci_config[ii]);
printk("\n");
} else {
printk("%x:", pci_config[ii]);
}
}
#endif
return true;
}
static void device_free_info(PSDevice pDevice) {
PSDevice ptr;
struct net_device *dev = pDevice->dev;
ASSERT(pDevice);
//2008-0714-01<Add>by chester
device_release_WPADEV(pDevice);
//2008-07-21-01<Add>by MikeLiu
//unregister wpadev
if (wpa_set_wpadev(pDevice, 0) != 0)
printk("unregister wpadev fail?\n");
if (pDevice_Infos == NULL)
return;
for (ptr = pDevice_Infos; ptr && (ptr != pDevice); ptr = ptr->next)
do {} while (0);
if (ptr == pDevice) {
if (ptr == pDevice_Infos)
pDevice_Infos = ptr->next;
else
ptr->prev->next = ptr->next;
} else {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "info struct not found\n");
return;
}
#ifdef HOSTAP
if (dev)
vt6655_hostap_set_hostapd(pDevice, 0, 0);
#endif
if (dev)
unregister_netdev(dev);
if (pDevice->PortOffset)
iounmap((void *)pDevice->PortOffset);
if (pDevice->pcid)
pci_release_regions(pDevice->pcid);
if (dev)
free_netdev(dev);
if (pDevice->pcid) {
pci_set_drvdata(pDevice->pcid, NULL);
}
}
static bool device_init_rings(PSDevice pDevice) {
void *vir_pool;
/*allocate all RD/TD rings a single pool*/
vir_pool = pci_alloc_consistent(pDevice->pcid,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
&pDevice->pool_dma);
if (vir_pool == NULL) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s : allocate desc dma memory failed\n", pDevice->dev->name);
return false;
}
memset(vir_pool, 0,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
);
pDevice->aRD0Ring = vir_pool;
pDevice->aRD1Ring = vir_pool +
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);
pDevice->rd0_pool_dma = pDevice->pool_dma;
pDevice->rd1_pool_dma = pDevice->rd0_pool_dma +
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);
pDevice->tx0_bufs = pci_alloc_consistent(pDevice->pcid,
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
&pDevice->tx_bufs_dma0);
if (pDevice->tx0_bufs == NULL) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: allocate buf dma memory failed\n", pDevice->dev->name);
pci_free_consistent(pDevice->pcid,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
vir_pool, pDevice->pool_dma
);
return false;
}
memset(pDevice->tx0_bufs, 0,
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE
);
pDevice->td0_pool_dma = pDevice->rd1_pool_dma +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);
pDevice->td1_pool_dma = pDevice->td0_pool_dma +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
// vir_pool: pvoid type
pDevice->apTD0Rings = vir_pool
+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);
pDevice->apTD1Rings = vir_pool
+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc)
+ pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
pDevice->tx1_bufs = pDevice->tx0_bufs +
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
pDevice->tx_beacon_bufs = pDevice->tx1_bufs +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;
pDevice->pbyTmpBuff = pDevice->tx_beacon_bufs +
CB_BEACON_BUF_SIZE;
pDevice->tx_bufs_dma1 = pDevice->tx_bufs_dma0 +
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
pDevice->tx_beacon_dma = pDevice->tx_bufs_dma1 +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;
return true;
}
static void device_free_rings(PSDevice pDevice) {
pci_free_consistent(pDevice->pcid,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
,
pDevice->aRD0Ring, pDevice->pool_dma
);
if (pDevice->tx0_bufs)
pci_free_consistent(pDevice->pcid,
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
pDevice->tx0_bufs, pDevice->tx_bufs_dma0
);
}
static void device_init_rd0_ring(PSDevice pDevice) {
int i;
dma_addr_t curr = pDevice->rd0_pool_dma;
PSRxDesc pDesc;
/* Init the RD0 ring entries */
for (i = 0; i < pDevice->sOpts.nRxDescs0; i ++, curr += sizeof(SRxDesc)) {
pDesc = &(pDevice->aRD0Ring[i]);
pDesc->pRDInfo = alloc_rd_info();
ASSERT(pDesc->pRDInfo);
if (!device_alloc_rx_buf(pDevice, pDesc)) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not alloc rx bufs\n",
pDevice->dev->name);
}
pDesc->next = &(pDevice->aRD0Ring[(i+1) % pDevice->sOpts.nRxDescs0]);
pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
}
if (i > 0)
pDevice->aRD0Ring[i-1].next_desc = cpu_to_le32(pDevice->rd0_pool_dma);
pDevice->pCurrRD[0] = &(pDevice->aRD0Ring[0]);
}
static void device_init_rd1_ring(PSDevice pDevice) {
int i;
dma_addr_t curr = pDevice->rd1_pool_dma;
PSRxDesc pDesc;
/* Init the RD1 ring entries */
for (i = 0; i < pDevice->sOpts.nRxDescs1; i ++, curr += sizeof(SRxDesc)) {
pDesc = &(pDevice->aRD1Ring[i]);
pDesc->pRDInfo = alloc_rd_info();
ASSERT(pDesc->pRDInfo);
if (!device_alloc_rx_buf(pDevice, pDesc)) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not alloc rx bufs\n",
pDevice->dev->name);
}
pDesc->next = &(pDevice->aRD1Ring[(i+1) % pDevice->sOpts.nRxDescs1]);
pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
}
if (i > 0)
pDevice->aRD1Ring[i-1].next_desc = cpu_to_le32(pDevice->rd1_pool_dma);
pDevice->pCurrRD[1] = &(pDevice->aRD1Ring[0]);
}
static void device_init_defrag_cb(PSDevice pDevice) {
int i;
PSDeFragControlBlock pDeF;
/* Init the fragment ctl entries */
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
pDeF = &(pDevice->sRxDFCB[i]);
if (!device_alloc_frag_buf(pDevice, pDeF)) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not alloc frag bufs\n",
pDevice->dev->name);
}
}
pDevice->cbDFCB = CB_MAX_RX_FRAG;
pDevice->cbFreeDFCB = pDevice->cbDFCB;
}
static void device_free_rd0_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nRxDescs0; i++) {
PSRxDesc pDesc = &(pDevice->aRD0Ring[i]);
PDEVICE_RD_INFO pRDInfo = pDesc->pRDInfo;
pci_unmap_single(pDevice->pcid, pRDInfo->skb_dma,
pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(pRDInfo->skb);
kfree((void *)pDesc->pRDInfo);
}
}
static void device_free_rd1_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nRxDescs1; i++) {
PSRxDesc pDesc = &(pDevice->aRD1Ring[i]);
PDEVICE_RD_INFO pRDInfo = pDesc->pRDInfo;
pci_unmap_single(pDevice->pcid, pRDInfo->skb_dma,
pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(pRDInfo->skb);
kfree((void *)pDesc->pRDInfo);
}
}
static void device_free_frag_buf(PSDevice pDevice) {
PSDeFragControlBlock pDeF;
int i;
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
pDeF = &(pDevice->sRxDFCB[i]);
if (pDeF->skb)
dev_kfree_skb(pDeF->skb);
}
}
static void device_init_td0_ring(PSDevice pDevice) {
int i;
dma_addr_t curr;
PSTxDesc pDesc;
curr = pDevice->td0_pool_dma;
for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++, curr += sizeof(STxDesc)) {
pDesc = &(pDevice->apTD0Rings[i]);
pDesc->pTDInfo = alloc_td_info();
ASSERT(pDesc->pTDInfo);
if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
pDesc->pTDInfo->buf = pDevice->tx0_bufs + (i)*PKT_BUF_SZ;
pDesc->pTDInfo->buf_dma = pDevice->tx_bufs_dma0 + (i)*PKT_BUF_SZ;
}
pDesc->next = &(pDevice->apTD0Rings[(i+1) % pDevice->sOpts.nTxDescs[0]]);
pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
}
if (i > 0)
pDevice->apTD0Rings[i-1].next_desc = cpu_to_le32(pDevice->td0_pool_dma);
pDevice->apTailTD[0] = pDevice->apCurrTD[0] = &(pDevice->apTD0Rings[0]);
}
static void device_init_td1_ring(PSDevice pDevice) {
int i;
dma_addr_t curr;
PSTxDesc pDesc;
/* Init the TD ring entries */
curr = pDevice->td1_pool_dma;
for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++, curr += sizeof(STxDesc)) {
pDesc = &(pDevice->apTD1Rings[i]);
pDesc->pTDInfo = alloc_td_info();
ASSERT(pDesc->pTDInfo);
if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
pDesc->pTDInfo->buf = pDevice->tx1_bufs + (i) * PKT_BUF_SZ;
pDesc->pTDInfo->buf_dma = pDevice->tx_bufs_dma1 + (i) * PKT_BUF_SZ;
}
pDesc->next = &(pDevice->apTD1Rings[(i + 1) % pDevice->sOpts.nTxDescs[1]]);
pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
}
if (i > 0)
pDevice->apTD1Rings[i-1].next_desc = cpu_to_le32(pDevice->td1_pool_dma);
pDevice->apTailTD[1] = pDevice->apCurrTD[1] = &(pDevice->apTD1Rings[0]);
}
static void device_free_td0_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++) {
PSTxDesc pDesc = &(pDevice->apTD0Rings[i]);
PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo;
if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
pci_unmap_single(pDevice->pcid, pTDInfo->skb_dma,
pTDInfo->skb->len, PCI_DMA_TODEVICE);
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
kfree((void *)pDesc->pTDInfo);
}
}
static void device_free_td1_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++) {
PSTxDesc pDesc = &(pDevice->apTD1Rings[i]);
PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo;
if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
pci_unmap_single(pDevice->pcid, pTDInfo->skb_dma,
pTDInfo->skb->len, PCI_DMA_TODEVICE);
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
kfree((void *)pDesc->pTDInfo);
}
}
/*-----------------------------------------------------------------*/
static int device_rx_srv(PSDevice pDevice, unsigned int uIdx) {
PSRxDesc pRD;
int works = 0;
for (pRD = pDevice->pCurrRD[uIdx];
pRD->m_rd0RD0.f1Owner == OWNED_BY_HOST;
pRD = pRD->next) {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->pCurrRD = %x, works = %d\n", pRD, works);
if (works++ > 15)
break;
if (device_receive_frame(pDevice, pRD)) {
if (!device_alloc_rx_buf(pDevice, pRD)) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
"%s: can not allocate rx buf\n", pDevice->dev->name);
break;
}
}
pRD->m_rd0RD0.f1Owner = OWNED_BY_NIC;
pDevice->dev->last_rx = jiffies;
}
pDevice->pCurrRD[uIdx] = pRD;
return works;
}
static bool device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pRD) {
PDEVICE_RD_INFO pRDInfo = pRD->pRDInfo;
pRDInfo->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pRDInfo->skb == NULL)
return false;
ASSERT(pRDInfo->skb);
pRDInfo->skb->dev = pDevice->dev;
pRDInfo->skb_dma = pci_map_single(pDevice->pcid, skb_tail_pointer(pRDInfo->skb),
pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
*((unsigned int *)&(pRD->m_rd0RD0)) = 0; /* FIX cast */
pRD->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
pRD->m_rd0RD0.f1Owner = OWNED_BY_NIC;
pRD->m_rd1RD1.wReqCount = cpu_to_le16(pDevice->rx_buf_sz);
pRD->buff_addr = cpu_to_le32(pRDInfo->skb_dma);
return true;
}
bool device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) {
pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDeF->skb == NULL)
return false;
ASSERT(pDeF->skb);
pDeF->skb->dev = pDevice->dev;
return true;
}
static int device_tx_srv(PSDevice pDevice, unsigned int uIdx) {
PSTxDesc pTD;
bool bFull = false;
int works = 0;
unsigned char byTsr0;
unsigned char byTsr1;
unsigned int uFrameSize, uFIFOHeaderSize;
PSTxBufHead pTxBufHead;
struct net_device_stats *pStats = &pDevice->stats;
struct sk_buff *skb;
unsigned int uNodeIndex;
PSMgmtObject pMgmt = pDevice->pMgmt;
for (pTD = pDevice->apTailTD[uIdx]; pDevice->iTDUsed[uIdx] > 0; pTD = pTD->next) {
if (pTD->m_td0TD0.f1Owner == OWNED_BY_NIC)
break;
if (works++ > 15)
break;
byTsr0 = pTD->m_td0TD0.byTSR0;
byTsr1 = pTD->m_td0TD0.byTSR1;
//Only the status of first TD in the chain is correct
if (pTD->m_td1TD1.byTCR & TCR_STP) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
uFIFOHeaderSize = pTD->pTDInfo->dwHeaderLength;
uFrameSize = pTD->pTDInfo->dwReqCount - uFIFOHeaderSize;
pTxBufHead = (PSTxBufHead) (pTD->pTDInfo->buf);
// Update the statistics based on the Transmit status
// now, we DONT check TSR0_CDH
STAvUpdateTDStatCounter(&pDevice->scStatistic,
byTsr0, byTsr1,
(unsigned char *)(pTD->pTDInfo->buf + uFIFOHeaderSize),
uFrameSize, uIdx);
BSSvUpdateNodeTxCounter(pDevice,
byTsr0, byTsr1,
(unsigned char *)(pTD->pTDInfo->buf),
uFIFOHeaderSize
);
if (!(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X].\n",
(int)uIdx, byTsr1, byTsr0);
}
if ((pTxBufHead->wFragCtl & FRAGCTL_ENDFRAG) != FRAGCTL_NONFRAG) {
pDevice->s802_11Counter.TransmittedFragmentCount++;
}
pStats->tx_packets++;
pStats->tx_bytes += pTD->pTDInfo->skb->len;
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Tx[%d] dropped & tsr1[%02X] tsr0[%02X].\n",
(int)uIdx, byTsr1, byTsr0);
pStats->tx_errors++;
pStats->tx_dropped++;
}
}
if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
if (pDevice->bEnableHostapd) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "tx call back netif.. \n");
skb = pTD->pTDInfo->skb;
skb->dev = pDevice->apdev;
skb_reset_mac_header(skb);
skb->pkt_type = PACKET_OTHERHOST;
//skb->protocol = htons(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
}
}
if (byTsr1 & TSR1_TERR) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Tx[%d] fail has error. tsr1[%02X] tsr0[%02X].\n",
(int)uIdx, byTsr1, byTsr0);
}
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Tx[%d] fail has error. tsr1[%02X] tsr0[%02X].\n",
// (int)uIdx, byTsr1, byTsr0);
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) &&
(pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)) {
unsigned short wAID;
unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
skb = pTD->pTDInfo->skb;
if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
// set tx map
wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
pTD->pTDInfo->byFlags &= ~(TD_FLAGS_NETIF_SKB);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "tx_srv:tx fail re-queue sta index= %d, QueCnt= %d\n"
, (int)uNodeIndex, pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt);
pStats->tx_errors--;
pStats->tx_dropped--;
}
}
}
}
device_free_tx_buf(pDevice, pTD);
pDevice->iTDUsed[uIdx]--;
}
}
if (uIdx == TYPE_AC0DMA) {
// RESERV_AC0DMA reserved for relay
if (AVAIL_TD(pDevice, uIdx) < RESERV_AC0DMA) {
bFull = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " AC0DMA is Full = %d\n", pDevice->iTDUsed[uIdx]);
}
if (netif_queue_stopped(pDevice->dev) && (bFull == false)) {
netif_wake_queue(pDevice->dev);
}
}
pDevice->apTailTD[uIdx] = pTD;
return works;
}
static void device_error(PSDevice pDevice, unsigned short status) {
if (status & ISR_FETALERR) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
"%s: Hardware fatal error.\n",
pDevice->dev->name);
netif_stop_queue(pDevice->dev);
del_timer(&pDevice->sTimerCommand);
del_timer(&(pDevice->pMgmt->sTimerSecondCallback));
pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
return;
}
}
static void device_free_tx_buf(PSDevice pDevice, PSTxDesc pDesc) {
PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo;
struct sk_buff *skb = pTDInfo->skb;
// pre-allocated buf_dma can't be unmapped.
if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma)) {
pci_unmap_single(pDevice->pcid, pTDInfo->skb_dma, skb->len,
PCI_DMA_TODEVICE);
}
if ((pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0)
dev_kfree_skb_irq(skb);
pTDInfo->skb_dma = 0;
pTDInfo->skb = 0;
pTDInfo->byFlags = 0;
}
//PLICE_DEBUG ->
void InitRxManagementQueue(PSDevice pDevice)
{
pDevice->rxManeQueue.packet_num = 0;
pDevice->rxManeQueue.head = pDevice->rxManeQueue.tail = 0;
}
//PLICE_DEBUG<-
//PLICE_DEBUG ->
int MlmeThread(
void *Context)
{
PSDevice pDevice = (PSDevice) Context;
PSRxMgmtPacket pRxMgmtPacket;
// int i;
//complete(&pDevice->notify);
//i = 0;
#if 1
while (1) {
//down(&pDevice->mlme_semaphore);
// pRxMgmtPacket = DeQueue(pDevice);
#if 1
spin_lock_irq(&pDevice->lock);
while (pDevice->rxManeQueue.packet_num != 0) {
pRxMgmtPacket = DeQueue(pDevice);
//pDevice;
//DequeueManageObject(pDevice->FirstRecvMngList, pDevice->LastRecvMngList);
vMgrRxManagePacket(pDevice, pDevice->pMgmt, pRxMgmtPacket);
}
spin_unlock_irq(&pDevice->lock);
if (mlme_kill == 0)
break;
//udelay(200);
#endif
schedule();
if (mlme_kill == 0)
break;
}
#endif
return 0;
}
static int device_open(struct net_device *dev) {
PSDevice pDevice = (PSDevice)netdev_priv(dev);
int i;
#ifdef WPA_SM_Transtatus
extern SWPAResult wpa_Result;
#endif
pDevice->rx_buf_sz = PKT_BUF_SZ;
if (!device_init_rings(pDevice)) {
return -ENOMEM;
}
//2008-5-13 <add> by chester
i = request_irq(pDevice->pcid->irq, &device_intr, IRQF_SHARED, dev->name, dev);
if (i)
return i;
#ifdef WPA_SM_Transtatus
memset(wpa_Result.ifname, 0, sizeof(wpa_Result.ifname));
wpa_Result.proto = 0;
wpa_Result.key_mgmt = 0;
wpa_Result.eap_type = 0;
wpa_Result.authenticated = false;
pDevice->fWPA_Authened = false;
#endif
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call device init rd0 ring\n");
device_init_rd0_ring(pDevice);
device_init_rd1_ring(pDevice);
device_init_defrag_cb(pDevice);
device_init_td0_ring(pDevice);
device_init_td1_ring(pDevice);
// VNTWIFIvSet11h(pDevice->pMgmt, pDevice->b11hEnable);
if (pDevice->bDiversityRegCtlON) {
device_init_diversity_timer(pDevice);
}
vMgrObjectInit(pDevice);
vMgrTimerInit(pDevice);
//PLICE_DEBUG->
#ifdef TASK_LET
tasklet_init(&pDevice->RxMngWorkItem, (void *)MngWorkItem, (unsigned long)pDevice);
#endif
#ifdef THREAD
InitRxManagementQueue(pDevice);
mlme_kill = 0;
mlme_task = kthread_run(MlmeThread, (void *)pDevice, "MLME");
if (IS_ERR(mlme_task)) {
printk("thread create fail\n");
return -1;
}
mlme_kill = 1;
#endif
//wait_for_completion(&pDevice->notify);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call device_init_registers\n");
device_init_registers(pDevice, DEVICE_INIT_COLD);
MACvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
memcpy(pDevice->pMgmt->abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN);
device_set_multi(pDevice->dev);
// Init for Key Management
KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
add_timer(&(pDevice->pMgmt->sTimerSecondCallback));
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
/*
pDevice->bwextstep0 = false;
pDevice->bwextstep1 = false;
pDevice->bwextstep2 = false;
pDevice->bwextstep3 = false;
*/
pDevice->bwextcount = 0;
pDevice->bWPASuppWextEnabled = false;
#endif
pDevice->byReAssocCount = 0;
pDevice->bWPADEVUp = false;
// Patch: if WEP key already set by iwconfig but device not yet open
if ((pDevice->bEncryptionEnable == true) && (pDevice->bTransmitKey == true)) {
KeybSetDefaultKey(&(pDevice->sKey),
(unsigned long)(pDevice->byKeyIndex | (1 << 31)),
pDevice->uKeyLength,
NULL,
pDevice->abyKey,
KEY_CTL_WEP,
pDevice->PortOffset,
pDevice->byLocalID
);
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call MACvIntEnable\n");
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
if (pDevice->pMgmt->eConfigMode == WMAC_CONFIG_AP) {
bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
} else {
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
}
pDevice->flags |= DEVICE_FLAGS_OPENED;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open success.. \n");
return 0;
}
static int device_close(struct net_device *dev) {
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = pDevice->pMgmt;
//PLICE_DEBUG->
#ifdef THREAD
mlme_kill = 0;
#endif
//PLICE_DEBUG<-
//2007-1121-02<Add>by EinsnLiu
if (pDevice->bLinkPass) {
bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
mdelay(30);
}
#ifdef TxInSleep
del_timer(&pDevice->sTimerTxData);
#endif
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
if (pDevice->bDiversityRegCtlON) {
del_timer(&pDevice->TimerSQ3Tmax1);
del_timer(&pDevice->TimerSQ3Tmax2);
del_timer(&pDevice->TimerSQ3Tmax3);
}
#ifdef TASK_LET
tasklet_kill(&pDevice->RxMngWorkItem);
#endif
netif_stop_queue(dev);
pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
MACbSoftwareReset(pDevice->PortOffset);
CARDbRadioPowerOff(pDevice);
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
device_free_td0_ring(pDevice);
device_free_td1_ring(pDevice);
device_free_rd0_ring(pDevice);
device_free_rd1_ring(pDevice);
device_free_frag_buf(pDevice);
device_free_rings(pDevice);
BSSvClearNodeDBTable(pDevice, 0);
free_irq(dev->irq, dev);
pDevice->flags &= (~DEVICE_FLAGS_OPENED);
//2008-0714-01<Add>by chester
device_release_WPADEV(pDevice);
//PLICE_DEBUG->
//tasklet_kill(&pDevice->RxMngWorkItem);
//PLICE_DEBUG<-
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close.. \n");
return 0;
}
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
PSDevice pDevice = netdev_priv(dev);
unsigned char *pbMPDU;
unsigned int cbMPDULen = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211\n");
spin_lock_irq(&pDevice->lock);
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211, td0 <=0\n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pDevice->bStopTx0Pkt == true) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
cbMPDULen = skb->len;
pbMPDU = skb->data;
vDMA0_tx_80211(pDevice, skb, pbMPDU, cbMPDULen);
spin_unlock_irq(&pDevice->lock);
return 0;
}
bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeIndex) {
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
unsigned int cbFrameBodySize;
unsigned int uMACfragNum;
unsigned char byPktType;
bool bNeedEncryption = false;
PSKeyItem pTransmitKey = NULL;
unsigned int cbHeaderSize;
unsigned int ii;
SKeyItem STempKey;
// unsigned char byKeyIndex = 0;
if (pDevice->bStopTx0Pkt == true) {
dev_kfree_skb_irq(skb);
return false;
}
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
dev_kfree_skb_irq(skb);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_xmit, td0 <=0\n");
return false;
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (pDevice->uAssocCount == 0) {
dev_kfree_skb_irq(skb);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_xmit, assocCount = 0\n");
return false;
}
}
pHeadTD = pDevice->apCurrTD[TYPE_TXDMA0];
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
if (uMACfragNum > AVAIL_TD(pDevice, TYPE_TXDMA0)) {
dev_kfree_skb_irq(skb);
return false;
}
byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
}
//preamble type
if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->byPreambleType = pDevice->byShortPreamble;
} else {
pDevice->byPreambleType = PREAMBLE_LONG;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dma0: pDevice->wCurrentRate = %d \n", pDevice->wCurrentRate);
if (pDevice->wCurrentRate <= RATE_11M) {
byPktType = PK_TYPE_11B;
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
if (pDevice->bProtectMode == true) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
}
}
if (pDevice->bEncryptionEnable == true)
bNeedEncryption = true;
if (pDevice->bEnableHostWEP) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
pTransmitKey->uKeyLength
);
}
vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
cbFrameBodySize, TYPE_TXDMA0, pHeadTD,
&pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
&uMACfragNum,
&cbHeaderSize
);
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
// Poll Transmit the adapter
wmb();
pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
wmb();
if (ii == (uMACfragNum - 1))
pLastTD = pHeadTD;
pHeadTD = pHeadTD->next;
}
// Save the information needed by the tx interrupt handler
// to complete the Send request
pLastTD->pTDInfo->skb = skb;
pLastTD->pTDInfo->byFlags = 0;
pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
pDevice->apCurrTD[TYPE_TXDMA0] = pHeadTD;
MACvTransmit0(pDevice->PortOffset);
return true;
}
//TYPE_AC0DMA data tx
static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
PSDevice pDevice = netdev_priv(dev);
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
unsigned int uNodeIndex = 0;
unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
unsigned short wAID;
unsigned int uMACfragNum = 1;
unsigned int cbFrameBodySize;
unsigned char byPktType;
unsigned int cbHeaderSize;
bool bNeedEncryption = false;
PSKeyItem pTransmitKey = NULL;
SKeyItem STempKey;
unsigned int ii;
bool bTKIP_UseGTK = false;
bool bNeedDeAuth = false;
unsigned char *pbyBSSID;
bool bNodeExist = false;
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass == false) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pDevice->bStopDataPkt) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (pDevice->uAssocCount == 0) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (is_multicast_ether_addr((unsigned char *)(skb->data))) {
uNodeIndex = 0;
bNodeExist = true;
if (pMgmt->sNodeDBTable[0].bPSEnable) {
skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skb);
pMgmt->sNodeDBTable[0].wEnQueueCnt++;
// set tx map
pMgmt->abyPSTxMap[0] |= byMask[0];
spin_unlock_irq(&pDevice->lock);
return 0;
}
} else {
if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
// set tx map
wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set:pMgmt->abyPSTxMap[%d]= %d\n",
(wAID >> 3), pMgmt->abyPSTxMap[wAID >> 3]);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->byPreambleType = pDevice->byShortPreamble;
} else {
pDevice->byPreambleType = PREAMBLE_LONG;
}
bNodeExist = true;
}
}
if (bNodeExist == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Unknown STA not found in node DB \n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
}
pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
if (pDevice->bEncryptionEnable == true) {
bNeedEncryption = true;
// get Transmit key
do {
if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
// get group key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Get GTK.\n");
break;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Get PTK.\n");
break;
}
} else if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
pbyBSSID = pDevice->sTxEthHeader.abyDstAddr; //TO_DS = 0 and FROM_DS = 0 --> 802.11 MAC Address1
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "IBSS Serach Key: \n");
for (ii = 0; ii < 6; ii++)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "%x \n", *(pbyBSSID+ii));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "\n");
// get pairwise key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == true)
break;
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "IBSS and KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
} else
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "NOT IBSS and KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
} else {
bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Get GTK.\n");
}
} while (false);
}
if (pDevice->bEnableHostWEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "acdma0: STA index %d\n", uNodeIndex);
if (pDevice->bEncryptionEnable == true) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
pTransmitKey->uKeyLength
);
}
}
uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA)) {
DBG_PRT(MSG_LEVEL_ERR, KERN_DEBUG "uMACfragNum > AVAIL_TD(TYPE_AC0DMA) = %d\n", uMACfragNum);
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pTransmitKey != NULL) {
if ((pTransmitKey->byCipherSuite == KEY_CTL_WEP) &&
(pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN)) {
uMACfragNum = 1; //WEP256 doesn't support fragment
}
}
byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
#ifdef PLICE_DEBUG
printk("Fix Rate: PhyType is %d,ConnectionRate is %d\n", pDevice->eCurrentPHYType, pDevice->uConnectionRate);
#endif
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
(pDevice->uConnectionRate <= RATE_6M)) {
pDevice->wCurrentRate = RATE_6M;
} else {
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
}
pDevice->byACKRate = (unsigned char) pDevice->wCurrentRate;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_6M;
} else {
//auto rate
if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
pDevice->wCurrentRate = RATE_1M;
pDevice->byACKRate = RATE_1M;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_6M;
} else {
pDevice->wCurrentRate = RATE_6M;
pDevice->byACKRate = RATE_6M;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_6M;
}
} else {
VNTWIFIvGetTxRate(pDevice->pMgmt,
pDevice->sTxEthHeader.abyDstAddr,
&(pDevice->wCurrentRate),
&(pDevice->byACKRate),
&(pDevice->byTopCCKBasicRate),
&(pDevice->byTopOFDMBasicRate));
}
}
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "acdma0: pDevice->wCurrentRate = %d \n", pDevice->wCurrentRate);
if (pDevice->wCurrentRate <= RATE_11M) {
byPktType = PK_TYPE_11B;
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
if (pDevice->bProtectMode == true) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
}
}
//#ifdef PLICE_DEBUG
// printk("FIX RATE:CurrentRate is %d");
//#endif
if (bNeedEncryption == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
if ((pDevice->sTxEthHeader.wType) == TYPE_PKT_802_1x) {
bNeedEncryption = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Don't Find TX KEY\n");
} else {
if (bTKIP_UseGTK == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "error: KEY is GTK!!~~\n");
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
bNeedEncryption = true;
}
}
}
if (pDevice->byCntMeasure == 2) {
bNeedDeAuth = true;
pDevice->s802_11Counter.TKIPCounterMeasuresInvoked++;
}
if (pDevice->bEnableHostWEP) {
if ((uNodeIndex != 0) &&
(pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
bNeedEncryption = true;
}
}
} else {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return no tx key\n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
}
}
vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
&pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
&uMACfragNum,
&cbHeaderSize
);
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
// Poll Transmit the adapter
wmb();
pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
wmb();
if (ii == uMACfragNum - 1)
pLastTD = pHeadTD;
pHeadTD = pHeadTD->next;
}
// Save the information needed by the tx interrupt handler
// to complete the Send request
pLastTD->pTDInfo->skb = skb;
pLastTD->pTDInfo->byFlags = 0;
pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
#ifdef TxInSleep
pDevice->nTxDataTimeCout = 0; //2008-8-21 chester <add> for send null packet
#endif
if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 1) {
netif_stop_queue(dev);
}
pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
//#ifdef PLICE_DEBUG
if (pDevice->bFixRate) {
printk("FixRate:Rate is %d,TxPower is %d\n", pDevice->wCurrentRate, pDevice->byCurPwr);
} else {
}
//#endif
{
unsigned char Protocol_Version; //802.1x Authentication
unsigned char Packet_Type; //802.1x Authentication
unsigned char Descriptor_type;
unsigned short Key_info;
bool bTxeapol_key = false;
Protocol_Version = skb->data[ETH_HLEN];
Packet_Type = skb->data[ETH_HLEN+1];
Descriptor_type = skb->data[ETH_HLEN+1+1+2];
Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
(Packet_Type == 3)) { //802.1x OR eapol-key challenge frame transfer
bTxeapol_key = true;
if ((Descriptor_type == 254) || (Descriptor_type == 2)) { //WPA or RSN
if (!(Key_info & BIT3) && //group-key challenge
(Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
pDevice->fWPA_Authened = true;
if (Descriptor_type == 254)
printk("WPA ");
else
printk("WPA2 ");
printk("Authentication completed!!\n");
}
}
}
}
}
MACvTransmitAC0(pDevice->PortOffset);
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "acdma0:pDevice->apCurrTD= %p\n", pHeadTD);
dev->trans_start = jiffies;
spin_unlock_irq(&pDevice->lock);
return 0;
}
static irqreturn_t device_intr(int irq, void *dev_instance) {
struct net_device *dev = dev_instance;
PSDevice pDevice = (PSDevice)netdev_priv(dev);
int max_count = 0;
unsigned long dwMIBCounter = 0;
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned char byOrgPageSel = 0;
int handled = 0;
unsigned char byData = 0;
int ii = 0;
// unsigned char byRSSI;
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
if (pDevice->dwIsr == 0)
return IRQ_RETVAL(handled);
if (pDevice->dwIsr == 0xffffffff) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dwIsr = 0xffff\n");
return IRQ_RETVAL(handled);
}
/*
// 2008-05-21 <mark> by Richardtai, we can't read RSSI here, because no packet bound with RSSI
if ((pDevice->dwIsr & ISR_RXDMA0) &&
(pDevice->byLocalID != REV_ID_VT3253_B0) &&
(pDevice->bBSSIDFilter == true)) {
// update RSSI
//BBbReadEmbedded(pDevice->PortOffset, 0x3E, &byRSSI);
//pDevice->uCurrRSSI = byRSSI;
}
*/
handled = 1;
MACvIntDisable(pDevice->PortOffset);
spin_lock_irq(&pDevice->lock);
//Make sure current page is 0
VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
if (byOrgPageSel == 1) {
MACvSelectPage0(pDevice->PortOffset);
} else
byOrgPageSel = 0;
MACvReadMIBCounter(pDevice->PortOffset, &dwMIBCounter);
// TBD....
// Must do this after doing rx/tx, cause ISR bit is slow
// than RD/TD write back
// update ISR counter
STAvUpdate802_11Counter(&pDevice->s802_11Counter, &pDevice->scStatistic , dwMIBCounter);
while (pDevice->dwIsr != 0) {
STAvUpdateIsrStatCounter(&pDevice->scStatistic, pDevice->dwIsr);
MACvWriteISR(pDevice->PortOffset, pDevice->dwIsr);
if (pDevice->dwIsr & ISR_FETALERR) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " ISR_FETALERR \n");
VNSvOutPortB(pDevice->PortOffset + MAC_REG_SOFTPWRCTL, 0);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
device_error(pDevice, pDevice->dwIsr);
}
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
if (pDevice->dwIsr & ISR_MEASURESTART) {
// 802.11h measure start
pDevice->byOrgChannel = pDevice->byCurrentCh;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byOrgRCR));
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, (RCR_RXALLTYPE | RCR_UNICAST | RCR_BROADCAST | RCR_MULTICAST | RCR_WPAERR));
MACvSelectPage1(pDevice->PortOffset);
VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR0, &(pDevice->dwOrgMAR0));
VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR4, &(pDevice->dwOrgMAR4));
MACvSelectPage0(pDevice->PortOffset);
//xxxx
// WCMDbFlushCommandQueue(pDevice->pMgmt, true);
if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel) == true) {
pDevice->bMeasureInProgress = true;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_READY);
MACvSelectPage0(pDevice->PortOffset);
pDevice->byBasicMap = 0;
pDevice->byCCAFraction = 0;
for (ii = 0; ii < 8; ii++) {
pDevice->dwRPIs[ii] = 0;
}
} else {
// can not measure because set channel fail
// WCMDbResetCommandQueue(pDevice->pMgmt);
// clear measure control
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_INCAPABLE);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
}
}
if (pDevice->dwIsr & ISR_MEASUREEND) {
// 802.11h measure end
pDevice->bMeasureInProgress = false;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR4, pDevice->dwOrgMAR4);
VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRBBSTS, &byData);
pDevice->byBasicMap |= (byData >> 4);
VNSvInPortB(pDevice->PortOffset + MAC_REG_CCAFRACTION, &pDevice->byCCAFraction);
VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRCTL, &byData);
// clear measure control
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
MACvSelectPage0(pDevice->PortOffset);
set_channel(pDevice, pDevice->byOrgChannel);
// WCMDbResetCommandQueue(pDevice->pMgmt);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
if (byData & MSRCTL_FINISH) {
// measure success
s_vCompleteCurrentMeasure(pDevice, 0);
} else {
// can not measure because not ready before end of measure time
s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_LATE);
}
}
if (pDevice->dwIsr & ISR_QUIETSTART) {
do {
;
} while (CARDbStartQuiet(pDevice) == false);
}
}
if (pDevice->dwIsr & ISR_TBTT) {
if (pDevice->bEnableFirstQuiet == true) {
pDevice->byQuietStartCount--;
if (pDevice->byQuietStartCount == 0) {
pDevice->bEnableFirstQuiet = false;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
MACvSelectPage0(pDevice->PortOffset);
}
}
if ((pDevice->bChannelSwitch == true) &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
pDevice->bChannelSwitch = false;
set_channel(pDevice, pDevice->byNewChannel);
VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
}
}
if (pDevice->eOPMode == OP_MODE_ADHOC) {
//pDevice->bBeaconSent = false;
} else {
if ((pDevice->bUpdateBBVGA) && (pDevice->bLinkPass == true) && (pDevice->uCurrRSSI != 0)) {
long ldBm;
RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
for (ii = 0; ii < BB_VGA_LEVEL; ii++) {
if (ldBm < pDevice->ldBmThreshold[ii]) {
pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
break;
}
}
if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
pDevice->uBBVGADiffCount++;
if (pDevice->uBBVGADiffCount == 1) {
// first VGA diff gain
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGANew);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
(int)ldBm, pDevice->byBBVGANew, pDevice->byBBVGACurrent, (int)pDevice->uBBVGADiffCount);
}
if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
(int)ldBm, pDevice->byBBVGANew, pDevice->byBBVGACurrent, (int)pDevice->uBBVGADiffCount);
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGANew);
}
} else {
pDevice->uBBVGADiffCount = 1;
}
}
}
pDevice->bBeaconSent = false;
if (pDevice->bEnablePSMode) {
PSbIsNextTBTTWakeUp((void *)pDevice);
}
if ((pDevice->eOPMode == OP_MODE_AP) ||
(pDevice->eOPMode == OP_MODE_ADHOC)) {
MACvOneShotTimer1MicroSec(pDevice->PortOffset,
(pMgmt->wIBSSBeaconPeriod - MAKE_BEACON_RESERVED) << 10);
}
if (pDevice->eOPMode == OP_MODE_ADHOC && pDevice->pMgmt->wCurrATIMWindow > 0) {
// todo adhoc PS mode
}
}
if (pDevice->dwIsr & ISR_BNTX) {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
pDevice->bIsBeaconBufReadySet = false;
pDevice->cbBeaconBufReadySetCnt = 0;
}
if (pDevice->eOPMode == OP_MODE_AP) {
if (pMgmt->byDTIMCount > 0) {
pMgmt->byDTIMCount--;
pMgmt->sNodeDBTable[0].bRxPSPoll = false;
} else {
if (pMgmt->byDTIMCount == 0) {
// check if mutltcast tx bufferring
pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
pMgmt->sNodeDBTable[0].bRxPSPoll = true;
bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
}
}
}
pDevice->bBeaconSent = true;
if (pDevice->bChannelSwitch == true) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
pDevice->bChannelSwitch = false;
set_channel(pDevice, pDevice->byNewChannel);
VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
//VNTWIFIbSendBeacon(pDevice->pMgmt);
CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
}
}
}
if (pDevice->dwIsr & ISR_RXDMA0) {
max_count += device_rx_srv(pDevice, TYPE_RXDMA0);
}
if (pDevice->dwIsr & ISR_RXDMA1) {
max_count += device_rx_srv(pDevice, TYPE_RXDMA1);
}
if (pDevice->dwIsr & ISR_TXDMA0) {
max_count += device_tx_srv(pDevice, TYPE_TXDMA0);
}
if (pDevice->dwIsr & ISR_AC0DMA) {
max_count += device_tx_srv(pDevice, TYPE_AC0DMA);
}
if (pDevice->dwIsr & ISR_SOFTTIMER) {
}
if (pDevice->dwIsr & ISR_SOFTTIMER1) {
if (pDevice->eOPMode == OP_MODE_AP) {
if (pDevice->bShortSlotTime)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
else
pMgmt->wCurrCapInfo &= ~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1));
}
bMgrPrepareBeaconToSend(pDevice, pMgmt);
pDevice->byCntMeasure = 0;
}
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
MACvReceive0(pDevice->PortOffset);
MACvReceive1(pDevice->PortOffset);
if (max_count > pDevice->sOpts.int_works)
break;
}
if (byOrgPageSel == 1) {
MACvSelectPage1(pDevice->PortOffset);
}
spin_unlock_irq(&pDevice->lock);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
return IRQ_RETVAL(handled);
}
static unsigned const ethernet_polynomial = 0x04c11db7U;
static inline u32 ether_crc(int length, unsigned char *data)
{
int crc = -1;
while (--length >= 0) {
unsigned char current_octet = *data++;
int bit;
for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
crc = (crc << 1) ^
((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
}
}
return crc;
}
//2008-8-4 <add> by chester
static int Config_FileGetParameter(unsigned char *string,
unsigned char *dest, unsigned char *source)
{
unsigned char buf1[100];
int source_len = strlen(source);
memset(buf1, 0, 100);
strcat(buf1, string);
strcat(buf1, "=");
source += strlen(buf1);
memcpy(dest, source, source_len - strlen(buf1));
return true;
}
int Config_FileOperation(PSDevice pDevice,bool fwrite,unsigned char *Parameter)
{
unsigned char *buffer = kmalloc(1024, GFP_KERNEL);
unsigned char tmpbuffer[20];
struct file *file;
int result=0;
if (!buffer) {
printk("allocate mem for file fail?\n");
return -1;
}
file = filp_open(CONFIG_PATH, O_RDONLY, 0);
if (IS_ERR(file)) {
kfree(buffer);
printk("Config_FileOperation:open file fail?\n");
return -1;
}
if (kernel_read(file, 0, buffer, 1024) < 0) {
printk("read file error?\n");
result = -1;
goto error1;
}
if (Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer)!=true) {
printk("get parameter error?\n");
result = -1;
goto error1;
}
if (memcmp(tmpbuffer,"USA",3)==0) {
result = ZoneType_USA;
} else if(memcmp(tmpbuffer,"JAPAN",5)==0) {
result = ZoneType_Japan;
} else if(memcmp(tmpbuffer,"EUROPE",5)==0) {
result = ZoneType_Europe;
} else {
result = -1;
printk("Unknown Zonetype[%s]?\n",tmpbuffer);
}
error1:
kfree(buffer);
fput(file);
return result;
}
static void device_set_multi(struct net_device *dev) {
PSDevice pDevice = (PSDevice)netdev_priv(dev);
PSMgmtObject pMgmt = pDevice->pMgmt;
u32 mc_filter[2];
struct netdev_hw_addr *ha;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
DBG_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
/* Unconditionally log net taps. */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
} else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
|| (dev->flags & IFF_ALLMULTI)) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, 0xffffffff);
MACvSelectPage0(pDevice->PortOffset);
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
} else {
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, mc_filter[0]);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, mc_filter[1]);
MACvSelectPage0(pDevice->PortOffset);
pDevice->byRxMode &= ~(RCR_UNICAST);
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
}
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
// If AP mode, don't enable RCR_UNICAST. Since hw only compare addr1 with local mac.
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
pDevice->byRxMode &= ~(RCR_UNICAST);
}
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byRxMode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode = %x\n", pDevice->byRxMode);
}
static struct net_device_stats *device_get_stats(struct net_device *dev) {
PSDevice pDevice = (PSDevice)netdev_priv(dev);
return &pDevice->stats;
}
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
PSDevice pDevice = (PSDevice)netdev_priv(dev);
struct iwreq *wrq = (struct iwreq *)rq;
int rc = 0;
PSMgmtObject pMgmt = pDevice->pMgmt;
PSCmdRequest pReq;
if (pMgmt == NULL) {
rc = -EFAULT;
return rc;
}
switch (cmd) {
case SIOCGIWNAME:
rc = iwctl_giwname(dev, NULL, (char *)&(wrq->u.name), NULL);
break;
case SIOCGIWNWID: //0x8b03 support
rc = -EOPNOTSUPP;
break;
// Set frequency/channel
case SIOCSIWFREQ:
rc = iwctl_siwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
// Get frequency/channel
case SIOCGIWFREQ:
rc = iwctl_giwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
// Set desired network name (ESSID)
case SIOCSIWESSID:
{
char essid[IW_ESSID_MAX_SIZE+1];
if (wrq->u.essid.length > IW_ESSID_MAX_SIZE) {
rc = -E2BIG;
break;
}
if (copy_from_user(essid, wrq->u.essid.pointer,
wrq->u.essid.length)) {
rc = -EFAULT;
break;
}
rc = iwctl_siwessid(dev, NULL,
&(wrq->u.essid), essid);
}
break;
// Get current network name (ESSID)
case SIOCGIWESSID:
{
char essid[IW_ESSID_MAX_SIZE+1];
if (wrq->u.essid.pointer)
rc = iwctl_giwessid(dev, NULL,
&(wrq->u.essid), essid);
if (copy_to_user(wrq->u.essid.pointer,
essid,
wrq->u.essid.length))
rc = -EFAULT;
}
break;
case SIOCSIWAP:
rc = iwctl_siwap(dev, NULL, &(wrq->u.ap_addr), NULL);
break;
// Get current Access Point (BSSID)
case SIOCGIWAP:
rc = iwctl_giwap(dev, NULL, &(wrq->u.ap_addr), NULL);
break;
// Set desired station name
case SIOCSIWNICKN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWNICKN \n");
rc = -EOPNOTSUPP;
break;
// Get current station name
case SIOCGIWNICKN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWNICKN \n");
rc = -EOPNOTSUPP;
break;
// Set the desired bit-rate
case SIOCSIWRATE:
rc = iwctl_siwrate(dev, NULL, &(wrq->u.bitrate), NULL);
break;
// Get the current bit-rate
case SIOCGIWRATE:
rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
break;
// Set the desired RTS threshold
case SIOCSIWRTS:
rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL);
break;
// Get the current RTS threshold
case SIOCGIWRTS:
rc = iwctl_giwrts(dev, NULL, &(wrq->u.rts), NULL);
break;
// Set the desired fragmentation threshold
case SIOCSIWFRAG:
rc = iwctl_siwfrag(dev, NULL, &(wrq->u.frag), NULL);
break;
// Get the current fragmentation threshold
case SIOCGIWFRAG:
rc = iwctl_giwfrag(dev, NULL, &(wrq->u.frag), NULL);
break;
// Set mode of operation
case SIOCSIWMODE:
rc = iwctl_siwmode(dev, NULL, &(wrq->u.mode), NULL);
break;
// Get mode of operation
case SIOCGIWMODE:
rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
break;
// Set WEP keys and mode
case SIOCSIWENCODE: {
char abyKey[WLAN_WEP232_KEYLEN];
if (wrq->u.encoding.pointer) {
if (wrq->u.encoding.length > WLAN_WEP232_KEYLEN) {
rc = -E2BIG;
break;
}
memset(abyKey, 0, WLAN_WEP232_KEYLEN);
if (copy_from_user(abyKey,
wrq->u.encoding.pointer,
wrq->u.encoding.length)) {
rc = -EFAULT;
break;
}
} else if (wrq->u.encoding.length != 0) {
rc = -EINVAL;
break;
}
rc = iwctl_siwencode(dev, NULL, &(wrq->u.encoding), abyKey);
}
break;
// Get the WEP keys and mode
case SIOCGIWENCODE:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
{
char abyKey[WLAN_WEP232_KEYLEN];
rc = iwctl_giwencode(dev, NULL, &(wrq->u.encoding), abyKey);
if (rc != 0) break;
if (wrq->u.encoding.pointer) {
if (copy_to_user(wrq->u.encoding.pointer,
abyKey,
wrq->u.encoding.length))
rc = -EFAULT;
}
}
break;
// Get the current Tx-Power
case SIOCGIWTXPOW:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWTXPOW \n");
rc = -EOPNOTSUPP;
break;
case SIOCSIWTXPOW:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWTXPOW \n");
rc = -EOPNOTSUPP;
break;
case SIOCSIWRETRY:
rc = iwctl_siwretry(dev, NULL, &(wrq->u.retry), NULL);
break;
case SIOCGIWRETRY:
rc = iwctl_giwretry(dev, NULL, &(wrq->u.retry), NULL);
break;
// Get range of parameters
case SIOCGIWRANGE:
{
struct iw_range range;
rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *)&range);
if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
rc = -EFAULT;
}
break;
case SIOCGIWPOWER:
rc = iwctl_giwpower(dev, NULL, &(wrq->u.power), NULL);
break;
case SIOCSIWPOWER:
rc = iwctl_siwpower(dev, NULL, &(wrq->u.power), NULL);
break;
case SIOCGIWSENS:
rc = iwctl_giwsens(dev, NULL, &(wrq->u.sens), NULL);
break;
case SIOCSIWSENS:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSENS \n");
rc = -EOPNOTSUPP;
break;
case SIOCGIWAPLIST: {
char buffer[IW_MAX_AP * (sizeof(struct sockaddr) + sizeof(struct iw_quality))];
if (wrq->u.data.pointer) {
rc = iwctl_giwaplist(dev, NULL, &(wrq->u.data), buffer);
if (rc == 0) {
if (copy_to_user(wrq->u.data.pointer,
buffer,
(wrq->u.data.length * (sizeof(struct sockaddr) + sizeof(struct iw_quality)))
))
rc = -EFAULT;
}
}
}
break;
#ifdef WIRELESS_SPY
// Set the spy list
case SIOCSIWSPY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSPY \n");
rc = -EOPNOTSUPP;
break;
// Get the spy list
case SIOCGIWSPY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSPY \n");
rc = -EOPNOTSUPP;
break;
#endif // WIRELESS_SPY
case SIOCGIWPRIV:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWPRIV \n");
rc = -EOPNOTSUPP;
/*
if (wrq->u.data.pointer) {
wrq->u.data.length = sizeof(iwctl_private_args) / sizeof(iwctl_private_args[0]);
if (copy_to_user(wrq->u.data.pointer,
(u_char *) iwctl_private_args,
sizeof(iwctl_private_args)))
rc = -EFAULT;
}
*/
break;
//2008-0409-07, <Add> by Einsn Liu
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
case SIOCSIWAUTH:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAUTH \n");
rc = iwctl_siwauth(dev, NULL, &(wrq->u.param), NULL);
break;
case SIOCGIWAUTH:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAUTH \n");
rc = iwctl_giwauth(dev, NULL, &(wrq->u.param), NULL);
break;
case SIOCSIWGENIE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWGENIE \n");
rc = iwctl_siwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
break;
case SIOCGIWGENIE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWGENIE \n");
rc = iwctl_giwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
break;
case SIOCSIWENCODEEXT: {
char extra[sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODEEXT \n");
if (wrq->u.encoding.pointer) {
memset(extra, 0, sizeof(struct iw_encode_ext)+MAX_KEY_LEN + 1);
if (wrq->u.encoding.length > (sizeof(struct iw_encode_ext) + MAX_KEY_LEN)) {
rc = -E2BIG;
break;
}
if (copy_from_user(extra, wrq->u.encoding.pointer, wrq->u.encoding.length)) {
rc = -EFAULT;
break;
}
} else if (wrq->u.encoding.length != 0) {
rc = -EINVAL;
break;
}
rc = iwctl_siwencodeext(dev, NULL, &(wrq->u.encoding), extra);
}
break;
case SIOCGIWENCODEEXT:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODEEXT \n");
rc = iwctl_giwencodeext(dev, NULL, &(wrq->u.encoding), NULL);
break;
case SIOCSIWMLME:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMLME \n");
rc = iwctl_siwmlme(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
break;
#endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
//End Add -- //2008-0409-07, <Add> by Einsn Liu
case IOCTL_CMD_TEST:
if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
rc = -EFAULT;
break;
} else {
rc = 0;
}
pReq = (PSCmdRequest)rq;
pReq->wResult = MAGIC_CODE;
break;
case IOCTL_CMD_SET:
#ifdef SndEvt_ToAPI
if ((((PSCmdRequest)rq)->wCmdCode != WLAN_CMD_SET_EVT) &&
!(pDevice->flags & DEVICE_FLAGS_OPENED))
#else
if (!(pDevice->flags & DEVICE_FLAGS_OPENED) &&
(((PSCmdRequest)rq)->wCmdCode != WLAN_CMD_SET_WPA))
#endif
{
rc = -EFAULT;
break;
} else {
rc = 0;
}
if (test_and_set_bit(0, (void *)&(pMgmt->uCmdBusy))) {
return -EBUSY;
}
rc = private_ioctl(pDevice, rq);
clear_bit(0, (void *)&(pMgmt->uCmdBusy));
break;
case IOCTL_CMD_HOSTAPD:
rc = vt6655_hostap_ioctl(pDevice, &wrq->u.data);
break;
case IOCTL_CMD_WPA:
rc = wpa_ioctl(pDevice, &wrq->u.data);
break;
case SIOCETHTOOL:
return ethtool_ioctl(dev, (void *)rq->ifr_data);
// All other calls are currently unsupported
default:
rc = -EOPNOTSUPP;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Ioctl command not support..%x\n", cmd);
}
if (pDevice->bCommit) {
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
spin_unlock_irq(&pDevice->lock);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Commit the settings\n");
spin_lock_irq(&pDevice->lock);
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
if (pDevice->bWPASuppWextEnabled != true)
#endif
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
}
pDevice->bCommit = false;
}
return rc;
}
static int ethtool_ioctl(struct net_device *dev, void *useraddr)
{
u32 ethcmd;
if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
switch (ethcmd) {
case ETHTOOL_GDRVINFO: {
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1);
strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1);
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
return 0;
}
}
return -EOPNOTSUPP;
}
/*------------------------------------------------------------------*/
MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
static struct pci_driver device_driver = {
.name = DEVICE_NAME,
.id_table = vt6655_pci_id_table,
.probe = vt6655_probe,
.remove = vt6655_remove,
#ifdef CONFIG_PM
.suspend = viawget_suspend,
.resume = viawget_resume,
#endif
};
static int __init vt6655_init_module(void)
{
int ret;
// ret=pci_module_init(&device_driver);
//ret = pcie_port_service_register(&device_driver);
ret = pci_register_driver(&device_driver);
#ifdef CONFIG_PM
if (ret >= 0)
register_reboot_notifier(&device_notifier);
#endif
return ret;
}
static void __exit vt6655_cleanup_module(void)
{
#ifdef CONFIG_PM
unregister_reboot_notifier(&device_notifier);
#endif
pci_unregister_driver(&device_driver);
}
module_init(vt6655_init_module);
module_exit(vt6655_cleanup_module);
#ifdef CONFIG_PM
static int
device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
{
struct pci_dev *pdev = NULL;
switch (event) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
for_each_pci_dev(pdev) {
if (pci_dev_driver(pdev) == &device_driver) {
if (pci_get_drvdata(pdev))
viawget_suspend(pdev, PMSG_HIBERNATE);
}
}
}
return NOTIFY_DONE;
}
static int
viawget_suspend(struct pci_dev *pcid, pm_message_t state)
{
int power_status; // to silence the compiler
PSDevice pDevice = pci_get_drvdata(pcid);
PSMgmtObject pMgmt = pDevice->pMgmt;
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
pci_save_state(pcid);
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
MACvSaveContext(pDevice->PortOffset, pDevice->abyMacContext);
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
pci_disable_device(pcid);
power_status = pci_set_power_state(pcid, pci_choose_state(pcid, state));
spin_unlock_irq(&pDevice->lock);
return 0;
}
static int
viawget_resume(struct pci_dev *pcid)
{
PSDevice pDevice = pci_get_drvdata(pcid);
PSMgmtObject pMgmt = pDevice->pMgmt;
int power_status; // to silence the compiler
power_status = pci_set_power_state(pcid, 0);
power_status = pci_enable_wake(pcid, 0, 0);
pci_restore_state(pcid);
if (netif_running(pDevice->dev)) {
spin_lock_irq(&pDevice->lock);
MACvRestoreContext(pDevice->PortOffset, pDevice->abyMacContext);
device_init_registers(pDevice, DEVICE_INIT_DXPL);
if (pMgmt->sNodeDBTable[0].bActive == true) { // Assoc with BSS
pMgmt->sNodeDBTable[0].bActive = false;
pDevice->bLinkPass = false;
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
// In Adhoc, BSS state set back to started.
pMgmt->eCurrState = WMAC_STATE_STARTED;
} else {
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
}
}
init_timer(&pMgmt->sTimerSecondCallback);
init_timer(&pDevice->sTimerCommand);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
}
return 0;
}
#endif
| gpl-2.0 |
kingklick/kk-nexus-kernel | drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c | 1529 | 14795 | /*
* Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
* Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
* Copyright (c) 2002, 2003 Tuukka Toivonen
* Copyright (c) 2008 Erik Andrén
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
* P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
* P/N 861075-0040: Sensor HDCS1000 ASIC
* P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
* P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
*/
/*
* The spec file for the PB-0100 suggests the following for best quality
* images after the sensor has been reset :
*
* PB_ADCGAINL = R60 = 0x03 (3 dec) : sets low reference of ADC
to produce good black level
* PB_PREADCTRL = R32 = 0x1400 (5120 dec) : Enables global gain changes
through R53
* PB_ADCMINGAIN = R52 = 0x10 (16 dec) : Sets the minimum gain for
auto-exposure
* PB_ADCGLOBALGAIN = R53 = 0x10 (16 dec) : Sets the global gain
* PB_EXPGAIN = R14 = 0x11 (17 dec) : Sets the auto-exposure value
* PB_UPDATEINT = R23 = 0x02 (2 dec) : Sets the speed on
auto-exposure routine
* PB_CFILLIN = R5 = 0x0E (14 dec) : Sets the frame rate
*/
#include "stv06xx_pb0100.h"
static const struct ctrl pb0100_ctrl[] = {
#define GAIN_IDX 0
{
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
.maximum = 255,
.step = 1,
.default_value = 128
},
.set = pb0100_set_gain,
.get = pb0100_get_gain
},
#define RED_BALANCE_IDX 1
{
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
.minimum = -255,
.maximum = 255,
.step = 1,
.default_value = 0
},
.set = pb0100_set_red_balance,
.get = pb0100_get_red_balance
},
#define BLUE_BALANCE_IDX 2
{
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Balance",
.minimum = -255,
.maximum = 255,
.step = 1,
.default_value = 0
},
.set = pb0100_set_blue_balance,
.get = pb0100_get_blue_balance
},
#define EXPOSURE_IDX 3
{
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Exposure",
.minimum = 0,
.maximum = 511,
.step = 1,
.default_value = 12
},
.set = pb0100_set_exposure,
.get = pb0100_get_exposure
},
#define AUTOGAIN_IDX 4
{
{
.id = V4L2_CID_AUTOGAIN,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Automatic Gain and Exposure",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1
},
.set = pb0100_set_autogain,
.get = pb0100_get_autogain
},
#define AUTOGAIN_TARGET_IDX 5
{
{
.id = V4L2_CTRL_CLASS_USER + 0x1000,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Automatic Gain Target",
.minimum = 0,
.maximum = 255,
.step = 1,
.default_value = 128
},
.set = pb0100_set_autogain_target,
.get = pb0100_get_autogain_target
},
#define NATURAL_IDX 6
{
{
.id = V4L2_CTRL_CLASS_USER + 0x1001,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Natural Light Source",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1
},
.set = pb0100_set_natural,
.get = pb0100_get_natural
}
};
static struct v4l2_pix_format pb0100_mode[] = {
/* low res / subsample modes disabled as they are only half res horizontal,
halving the vertical resolution does not seem to work */
{
320,
240,
V4L2_PIX_FMT_SGRBG8,
V4L2_FIELD_NONE,
.sizeimage = 320 * 240,
.bytesperline = 320,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = PB0100_CROP_TO_VGA
},
{
352,
288,
V4L2_PIX_FMT_SGRBG8,
V4L2_FIELD_NONE,
.sizeimage = 352 * 288,
.bytesperline = 352,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
}
};
static int pb0100_probe(struct sd *sd)
{
u16 sensor;
int i, err;
s32 *sensor_settings;
err = stv06xx_read_sensor(sd, PB_IDENT, &sensor);
if (err < 0)
return -ENODEV;
if ((sensor >> 8) == 0x64) {
sensor_settings = kmalloc(
ARRAY_SIZE(pb0100_ctrl) * sizeof(s32),
GFP_KERNEL);
if (!sensor_settings)
return -ENOMEM;
info("Photobit pb0100 sensor detected");
sd->gspca_dev.cam.cam_mode = pb0100_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
sd->desc.ctrls = pb0100_ctrl;
sd->desc.nctrls = ARRAY_SIZE(pb0100_ctrl);
for (i = 0; i < sd->desc.nctrls; i++)
sensor_settings[i] = pb0100_ctrl[i].qctrl.default_value;
sd->sensor_priv = sensor_settings;
return 0;
}
return -ENODEV;
}
static int pb0100_start(struct sd *sd)
{
int err;
struct cam *cam = &sd->gspca_dev.cam;
s32 *sensor_settings = sd->sensor_priv;
u32 mode = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
/* Setup sensor window */
if (mode & PB0100_CROP_TO_VGA) {
stv06xx_write_sensor(sd, PB_RSTART, 30);
stv06xx_write_sensor(sd, PB_CSTART, 20);
stv06xx_write_sensor(sd, PB_RWSIZE, 240 - 1);
stv06xx_write_sensor(sd, PB_CWSIZE, 320 - 1);
} else {
stv06xx_write_sensor(sd, PB_RSTART, 8);
stv06xx_write_sensor(sd, PB_CSTART, 4);
stv06xx_write_sensor(sd, PB_RWSIZE, 288 - 1);
stv06xx_write_sensor(sd, PB_CWSIZE, 352 - 1);
}
if (mode & PB0100_SUBSAMPLE) {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02); /* Wrong, FIXME */
stv06xx_write_bridge(sd, STV_X_CTRL, 0x06);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x10);
} else {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01);
stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a);
/* larger -> slower */
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
}
/* set_gain also sets red and blue balance */
pb0100_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
pb0100_set_exposure(&sd->gspca_dev, sensor_settings[EXPOSURE_IDX]);
pb0100_set_autogain_target(&sd->gspca_dev,
sensor_settings[AUTOGAIN_TARGET_IDX]);
pb0100_set_autogain(&sd->gspca_dev, sensor_settings[AUTOGAIN_IDX]);
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)|BIT(1));
PDEBUG(D_STREAM, "Started stream, status: %d", err);
return (err < 0) ? err : 0;
}
static int pb0100_stop(struct sd *sd)
{
int err;
err = stv06xx_write_sensor(sd, PB_ABORTFRAME, 1);
if (err < 0)
goto out;
/* Set bit 1 to zero */
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
PDEBUG(D_STREAM, "Halting stream");
out:
return (err < 0) ? err : 0;
}
static void pb0100_disconnect(struct sd *sd)
{
sd->sensor = NULL;
kfree(sd->sensor_priv);
}
/* FIXME: Sort the init commands out and put them into tables,
this is only for getting the camera to work */
/* FIXME: No error handling for now,
add this once the init has been converted to proper tables */
static int pb0100_init(struct sd *sd)
{
stv06xx_write_bridge(sd, STV_REG00, 1);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0);
/* Reset sensor */
stv06xx_write_sensor(sd, PB_RESET, 1);
stv06xx_write_sensor(sd, PB_RESET, 0);
/* Disable chip */
stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
/* Gain stuff...*/
stv06xx_write_sensor(sd, PB_PREADCTRL, BIT(12)|BIT(10)|BIT(6));
stv06xx_write_sensor(sd, PB_ADCGLOBALGAIN, 12);
/* Set up auto-exposure */
/* ADC VREF_HI new setting for a transition
from the Expose1 to the Expose2 setting */
stv06xx_write_sensor(sd, PB_R28, 12);
/* gain max for autoexposure */
stv06xx_write_sensor(sd, PB_ADCMAXGAIN, 180);
/* gain min for autoexposure */
stv06xx_write_sensor(sd, PB_ADCMINGAIN, 12);
/* Maximum frame integration time (programmed into R8)
allowed for auto-exposure routine */
stv06xx_write_sensor(sd, PB_R54, 3);
/* Minimum frame integration time (programmed into R8)
allowed for auto-exposure routine */
stv06xx_write_sensor(sd, PB_R55, 0);
stv06xx_write_sensor(sd, PB_UPDATEINT, 1);
/* R15 Expose0 (maximum that auto-exposure may use) */
stv06xx_write_sensor(sd, PB_R15, 800);
/* R17 Expose2 (minimum that auto-exposure may use) */
stv06xx_write_sensor(sd, PB_R17, 10);
stv06xx_write_sensor(sd, PB_EXPGAIN, 0);
/* 0x14 */
stv06xx_write_sensor(sd, PB_VOFFSET, 0);
/* 0x0D */
stv06xx_write_sensor(sd, PB_ADCGAINH, 11);
/* Set black level (important!) */
stv06xx_write_sensor(sd, PB_ADCGAINL, 0);
/* ??? */
stv06xx_write_bridge(sd, STV_REG00, 0x11);
stv06xx_write_bridge(sd, STV_REG03, 0x45);
stv06xx_write_bridge(sd, STV_REG04, 0x07);
/* ISO-Size (0x27b: 635... why? - HDCS uses 847) */
stv06xx_write_bridge(sd, STV_ISO_SIZE_L, 847);
/* Scan/timing for the sensor */
stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1));
stv06xx_write_sensor(sd, PB_CFILLIN, 14);
stv06xx_write_sensor(sd, PB_VBL, 0);
stv06xx_write_sensor(sd, PB_FINTTIME, 0);
stv06xx_write_sensor(sd, PB_RINTTIME, 123);
stv06xx_write_bridge(sd, STV_REG01, 0xc2);
stv06xx_write_bridge(sd, STV_REG02, 0xb0);
return 0;
}
static int pb0100_dump(struct sd *sd)
{
return 0;
}
static int pb0100_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[GAIN_IDX];
return 0;
}
static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
if (sensor_settings[AUTOGAIN_IDX])
return -EBUSY;
sensor_settings[GAIN_IDX] = val;
err = stv06xx_write_sensor(sd, PB_G1GAIN, val);
if (!err)
err = stv06xx_write_sensor(sd, PB_G2GAIN, val);
PDEBUG(D_V4L2, "Set green gain to %d, status: %d", val, err);
if (!err)
err = pb0100_set_red_balance(gspca_dev,
sensor_settings[RED_BALANCE_IDX]);
if (!err)
err = pb0100_set_blue_balance(gspca_dev,
sensor_settings[BLUE_BALANCE_IDX]);
return err;
}
static int pb0100_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[RED_BALANCE_IDX];
return 0;
}
static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
if (sensor_settings[AUTOGAIN_IDX])
return -EBUSY;
sensor_settings[RED_BALANCE_IDX] = val;
val += sensor_settings[GAIN_IDX];
if (val < 0)
val = 0;
else if (val > 255)
val = 255;
err = stv06xx_write_sensor(sd, PB_RGAIN, val);
PDEBUG(D_V4L2, "Set red gain to %d, status: %d", val, err);
return err;
}
static int pb0100_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[BLUE_BALANCE_IDX];
return 0;
}
static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
if (sensor_settings[AUTOGAIN_IDX])
return -EBUSY;
sensor_settings[BLUE_BALANCE_IDX] = val;
val += sensor_settings[GAIN_IDX];
if (val < 0)
val = 0;
else if (val > 255)
val = 255;
err = stv06xx_write_sensor(sd, PB_BGAIN, val);
PDEBUG(D_V4L2, "Set blue gain to %d, status: %d", val, err);
return err;
}
static int pb0100_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[EXPOSURE_IDX];
return 0;
}
static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
if (sensor_settings[AUTOGAIN_IDX])
return -EBUSY;
sensor_settings[EXPOSURE_IDX] = val;
err = stv06xx_write_sensor(sd, PB_RINTTIME, val);
PDEBUG(D_V4L2, "Set exposure to %d, status: %d", val, err);
return err;
}
static int pb0100_get_autogain(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[AUTOGAIN_IDX];
return 0;
}
static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[AUTOGAIN_IDX] = val;
if (sensor_settings[AUTOGAIN_IDX]) {
if (sensor_settings[NATURAL_IDX])
val = BIT(6)|BIT(4)|BIT(0);
else
val = BIT(4)|BIT(0);
} else
val = 0;
err = stv06xx_write_sensor(sd, PB_EXPGAIN, val);
PDEBUG(D_V4L2, "Set autogain to %d (natural: %d), status: %d",
sensor_settings[AUTOGAIN_IDX], sensor_settings[NATURAL_IDX],
err);
return err;
}
static int pb0100_get_autogain_target(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[AUTOGAIN_TARGET_IDX];
return 0;
}
static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
{
int err, totalpixels, brightpixels, darkpixels;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[AUTOGAIN_TARGET_IDX] = val;
/* Number of pixels counted by the sensor when subsampling the pixels.
* Slightly larger than the real value to avoid oscillation */
totalpixels = gspca_dev->width * gspca_dev->height;
totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
brightpixels = (totalpixels * val) >> 8;
darkpixels = totalpixels - brightpixels;
err = stv06xx_write_sensor(sd, PB_R21, brightpixels);
if (!err)
err = stv06xx_write_sensor(sd, PB_R22, darkpixels);
PDEBUG(D_V4L2, "Set autogain target to %d, status: %d", val, err);
return err;
}
static int pb0100_get_natural(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[NATURAL_IDX];
return 0;
}
static int pb0100_set_natural(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[NATURAL_IDX] = val;
return pb0100_set_autogain(gspca_dev, sensor_settings[AUTOGAIN_IDX]);
}
| gpl-2.0 |
Linux-Wii-Mod/linux-wii-2.6.32 | arch/cris/arch-v32/mm/init.c | 1529 | 5451 | /*
* Set up paging and the MMU.
*
* Copyright (C) 2000-2003, Axis Communications AB.
*
* Authors: Bjorn Wesen <bjornw@axis.com>
* Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
*/
#include <linux/mmzone.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/types.h>
#include <asm/mmu.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <arch/hwregs/asm/mmu_defs_asm.h>
#include <arch/hwregs/supp_reg.h>
extern void tlb_init(void);
/*
* The kernel is already mapped with linear mapping at kseg_c so there's no
* need to map it with a page table. However, head.S also temporarily mapped it
* at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
* other paging stuff.
*/
void __init
cris_mmu_init(void)
{
unsigned long mmu_config;
unsigned long mmu_kbase_hi;
unsigned long mmu_kbase_lo;
unsigned short mmu_page_id;
/*
* Make sure the current pgd table points to something sane, even if it
* is most probably not used until the next switch_mm.
*/
per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
#ifdef CONFIG_SMP
{
pgd_t **pgd;
pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
SUPP_BANK_SEL(1);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
SUPP_BANK_SEL(2);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
}
#endif
/* Initialise the TLB. Function found in tlb.c. */
tlb_init();
/* Enable exceptions and initialize the kernel segments. */
mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
REG_STATE(mmu, rw_mm_cfg, acc, on) |
REG_STATE(mmu, rw_mm_cfg, ex, on) |
REG_STATE(mmu, rw_mm_cfg, inv, on) |
REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
#ifndef CONFIG_ETRAX_VCS_SIM
REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
#else
REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
#endif
REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
REG_STATE(mmu, rw_mm_cfg, seg_0, page));
mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
#ifndef CONFIG_ETRAX_VCS_SIM
REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
#else
REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
#endif
REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
/* Update the instruction MMU. */
SUPP_BANK_SEL(BANK_IM);
SUPP_REG_WR(RW_MM_CFG, mmu_config);
SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
/* Update the data MMU. */
SUPP_BANK_SEL(BANK_DM);
SUPP_REG_WR(RW_MM_CFG, mmu_config);
SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
SPEC_REG_WR(SPEC_REG_PID, 0);
/*
* The MMU has been enabled ever since head.S but just to make it
* totally obvious enable it here as well.
*/
SUPP_BANK_SEL(BANK_GC);
SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
}
void __init
paging_init(void)
{
int i;
unsigned long zones_size[MAX_NR_ZONES];
printk("Setting up paging and the MMU.\n");
/* Clear out the init_mm.pgd that will contain the kernel's mappings. */
for(i = 0; i < PTRS_PER_PGD; i++)
swapper_pg_dir[i] = __pgd(0);
cris_mmu_init();
/*
* Initialize the bad page table and bad page to point to a couple of
* allocated pages.
*/
empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE);
/* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
for (i = 1; i < MAX_NR_ZONES; i++)
zones_size[i] = 0;
/*
* Use free_area_init_node instead of free_area_init, because it is
* designed for systems where the DRAM starts at an address
* substantially higher than 0, like us (we start at PAGE_OFFSET). This
* saves space in the mem_map page array.
*/
free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
mem_map = contig_page_data.node_mem_map;
}
| gpl-2.0 |
monishk10/kernel_kenzo | kernel/utsname.c | 2041 | 2925 | /*
* Copyright (C) 2004 IBM Corporation
*
* Author: Serge Hallyn <serue@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/export.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
#include <linux/proc_ns.h>
static struct uts_namespace *create_uts_ns(void)
{
struct uts_namespace *uts_ns;
uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL);
if (uts_ns)
kref_init(&uts_ns->kref);
return uts_ns;
}
/*
* Clone a new ns copying an original utsname, setting refcount to 1
* @old_ns: namespace to clone
* Return ERR_PTR(-ENOMEM) on error (failure to kmalloc), new ns otherwise
*/
static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
struct uts_namespace *old_ns)
{
struct uts_namespace *ns;
int err;
ns = create_uts_ns();
if (!ns)
return ERR_PTR(-ENOMEM);
err = proc_alloc_inum(&ns->proc_inum);
if (err) {
kfree(ns);
return ERR_PTR(err);
}
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
ns->user_ns = get_user_ns(user_ns);
up_read(&uts_sem);
return ns;
}
/*
* Copy task tsk's utsname namespace, or clone it if flags
* specifies CLONE_NEWUTS. In latter case, changes to the
* utsname of this process won't be seen by parent, and vice
* versa.
*/
struct uts_namespace *copy_utsname(unsigned long flags,
struct user_namespace *user_ns, struct uts_namespace *old_ns)
{
struct uts_namespace *new_ns;
BUG_ON(!old_ns);
get_uts_ns(old_ns);
if (!(flags & CLONE_NEWUTS))
return old_ns;
new_ns = clone_uts_ns(user_ns, old_ns);
put_uts_ns(old_ns);
return new_ns;
}
void free_uts_ns(struct kref *kref)
{
struct uts_namespace *ns;
ns = container_of(kref, struct uts_namespace, kref);
put_user_ns(ns->user_ns);
proc_free_inum(ns->proc_inum);
kfree(ns);
}
static void *utsns_get(struct task_struct *task)
{
struct uts_namespace *ns = NULL;
struct nsproxy *nsproxy;
rcu_read_lock();
nsproxy = task_nsproxy(task);
if (nsproxy) {
ns = nsproxy->uts_ns;
get_uts_ns(ns);
}
rcu_read_unlock();
return ns;
}
static void utsns_put(void *ns)
{
put_uts_ns(ns);
}
static int utsns_install(struct nsproxy *nsproxy, void *new)
{
struct uts_namespace *ns = new;
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
!nsown_capable(CAP_SYS_ADMIN))
return -EPERM;
get_uts_ns(ns);
put_uts_ns(nsproxy->uts_ns);
nsproxy->uts_ns = ns;
return 0;
}
static unsigned int utsns_inum(void *vp)
{
struct uts_namespace *ns = vp;
return ns->proc_inum;
}
const struct proc_ns_operations utsns_operations = {
.name = "uts",
.type = CLONE_NEWUTS,
.get = utsns_get,
.put = utsns_put,
.install = utsns_install,
.inum = utsns_inum,
};
| gpl-2.0 |
ircncl/linux-grsec-incremental | drivers/net/ethernet/atheros/alx/ethtool.c | 2041 | 8393 | /*
* Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/mdio.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
#include "alx.h"
#include "reg.h"
#include "hw.h"
/* The order of these strings must match the order of the fields in
* struct alx_hw_stats
* See hw.h
*/
static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_packets",
"rx_bcast_packets",
"rx_mcast_packets",
"rx_pause_packets",
"rx_ctrl_packets",
"rx_fcs_errors",
"rx_length_errors",
"rx_bytes",
"rx_runt_packets",
"rx_fragments",
"rx_64B_or_less_packets",
"rx_65B_to_127B_packets",
"rx_128B_to_255B_packets",
"rx_256B_to_511B_packets",
"rx_512B_to_1023B_packets",
"rx_1024B_to_1518B_packets",
"rx_1519B_to_mtu_packets",
"rx_oversize_packets",
"rx_rxf_ov_drop_packets",
"rx_rrd_ov_drop_packets",
"rx_align_errors",
"rx_bcast_bytes",
"rx_mcast_bytes",
"rx_address_errors",
"tx_packets",
"tx_bcast_packets",
"tx_mcast_packets",
"tx_pause_packets",
"tx_exc_defer_packets",
"tx_ctrl_packets",
"tx_defer_packets",
"tx_bytes",
"tx_64B_or_less_packets",
"tx_65B_to_127B_packets",
"tx_128B_to_255B_packets",
"tx_256B_to_511B_packets",
"tx_512B_to_1023B_packets",
"tx_1024B_to_1518B_packets",
"tx_1519B_to_mtu_packets",
"tx_single_collision",
"tx_multiple_collisions",
"tx_late_collision",
"tx_abort_collision",
"tx_underrun",
"tx_trd_eop",
"tx_length_errors",
"tx_trunc_packets",
"tx_bcast_bytes",
"tx_mcast_bytes",
"tx_update",
};
#define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats)
static u32 alx_get_supported_speeds(struct alx_hw *hw)
{
u32 supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full;
if (alx_hw_giga(hw))
supported |= SUPPORTED_1000baseT_Full;
BUILD_BUG_ON(SUPPORTED_10baseT_Half != ADVERTISED_10baseT_Half);
BUILD_BUG_ON(SUPPORTED_10baseT_Full != ADVERTISED_10baseT_Full);
BUILD_BUG_ON(SUPPORTED_100baseT_Half != ADVERTISED_100baseT_Half);
BUILD_BUG_ON(SUPPORTED_100baseT_Full != ADVERTISED_100baseT_Full);
BUILD_BUG_ON(SUPPORTED_1000baseT_Full != ADVERTISED_1000baseT_Full);
return supported;
}
static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_TP |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
if (alx_hw_giga(hw))
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->supported |= alx_get_supported_speeds(hw);
ecmd->advertising = ADVERTISED_TP;
if (hw->adv_cfg & ADVERTISED_Autoneg)
ecmd->advertising |= hw->adv_cfg;
ecmd->port = PORT_TP;
ecmd->phy_address = 0;
if (hw->adv_cfg & ADVERTISED_Autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
else
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_INTERNAL;
if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
if (hw->flowctrl & ALX_FC_RX) {
ecmd->advertising |= ADVERTISED_Pause;
if (!(hw->flowctrl & ALX_FC_TX))
ecmd->advertising |= ADVERTISED_Asym_Pause;
} else if (hw->flowctrl & ALX_FC_TX) {
ecmd->advertising |= ADVERTISED_Asym_Pause;
}
}
ethtool_cmd_speed_set(ecmd, hw->link_speed);
ecmd->duplex = hw->duplex;
return 0;
}
static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
u32 adv_cfg;
ASSERT_RTNL();
if (ecmd->autoneg == AUTONEG_ENABLE) {
if (ecmd->advertising & ~alx_get_supported_speeds(hw))
return -EINVAL;
adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
} else {
adv_cfg = alx_speed_to_ethadv(ethtool_cmd_speed(ecmd),
ecmd->duplex);
if (!adv_cfg || adv_cfg == ADVERTISED_1000baseT_Full)
return -EINVAL;
}
hw->adv_cfg = adv_cfg;
return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
}
static void alx_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
pause->autoneg = !!(hw->flowctrl & ALX_FC_ANEG &&
hw->adv_cfg & ADVERTISED_Autoneg);
pause->tx_pause = !!(hw->flowctrl & ALX_FC_TX);
pause->rx_pause = !!(hw->flowctrl & ALX_FC_RX);
}
static int alx_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
int err = 0;
bool reconfig_phy = false;
u8 fc = 0;
if (pause->tx_pause)
fc |= ALX_FC_TX;
if (pause->rx_pause)
fc |= ALX_FC_RX;
if (pause->autoneg)
fc |= ALX_FC_ANEG;
ASSERT_RTNL();
/* restart auto-neg for auto-mode */
if (hw->adv_cfg & ADVERTISED_Autoneg) {
if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
reconfig_phy = true;
if (fc & hw->flowctrl & ALX_FC_ANEG &&
(fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
reconfig_phy = true;
}
if (reconfig_phy) {
err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
if (err)
return err;
}
/* flow control on mac */
if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
alx_cfg_mac_flowcontrol(hw, fc);
hw->flowctrl = fc;
return 0;
}
static u32 alx_get_msglevel(struct net_device *netdev)
{
struct alx_priv *alx = netdev_priv(netdev);
return alx->msg_enable;
}
static void alx_set_msglevel(struct net_device *netdev, u32 data)
{
struct alx_priv *alx = netdev_priv(netdev);
alx->msg_enable = data;
}
static void alx_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *estats, u64 *data)
{
struct alx_priv *alx = netdev_priv(netdev);
struct alx_hw *hw = &alx->hw;
spin_lock(&alx->stats_lock);
alx_update_hw_stats(hw);
BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
ALX_NUM_STATS * sizeof(u64));
memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
spin_unlock(&alx->stats_lock);
}
static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats));
break;
default:
WARN_ON(1);
break;
}
}
static int alx_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ALX_NUM_STATS;
default:
return -EINVAL;
}
}
const struct ethtool_ops alx_ethtool_ops = {
.get_settings = alx_get_settings,
.set_settings = alx_set_settings,
.get_pauseparam = alx_get_pauseparam,
.set_pauseparam = alx_set_pauseparam,
.get_msglevel = alx_get_msglevel,
.set_msglevel = alx_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = alx_get_strings,
.get_sset_count = alx_get_sset_count,
.get_ethtool_stats = alx_get_ethtool_stats,
};
| gpl-2.0 |
monishk10/kernel_cancro | drivers/mfd/wcd9xxx-core-resource.c | 2297 | 6032 | /* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/mfd/wcd9xxx/core-resource.h>
static enum wcd9xxx_intf_status wcd9xxx_intf = -1;
int wcd9xxx_core_irq_init(
struct wcd9xxx_core_resource *wcd9xxx_core_res)
{
int ret = 0;
if (wcd9xxx_core_res->irq != 1) {
ret = wcd9xxx_irq_init(wcd9xxx_core_res);
if (ret)
pr_err("IRQ initialization failed\n");
}
return ret;
}
EXPORT_SYMBOL(wcd9xxx_core_irq_init);
int wcd9xxx_initialize_irq(
struct wcd9xxx_core_resource *wcd9xxx_core_res,
unsigned int irq,
unsigned int irq_base)
{
wcd9xxx_core_res->irq = irq;
wcd9xxx_core_res->irq_base = irq_base;
return 0;
}
EXPORT_SYMBOL(wcd9xxx_initialize_irq);
int wcd9xxx_core_res_init(
struct wcd9xxx_core_resource *wcd9xxx_core_res,
int num_irqs, int num_irq_regs,
int (*codec_read)(struct wcd9xxx_core_resource*, unsigned short),
int (*codec_write)(struct wcd9xxx_core_resource*, unsigned short, u8),
int (*codec_bulk_read) (struct wcd9xxx_core_resource*, unsigned short,
int, u8*),
int (*codec_bulk_write) (struct wcd9xxx_core_resource*, unsigned short,
int, u8*))
{
mutex_init(&wcd9xxx_core_res->pm_lock);
wcd9xxx_core_res->wlock_holders = 0;
wcd9xxx_core_res->pm_state = WCD9XXX_PM_SLEEPABLE;
init_waitqueue_head(&wcd9xxx_core_res->pm_wq);
pm_qos_add_request(&wcd9xxx_core_res->pm_qos_req,
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
wcd9xxx_core_res->codec_reg_read = codec_read;
wcd9xxx_core_res->codec_reg_write = codec_write;
wcd9xxx_core_res->codec_bulk_read = codec_bulk_read;
wcd9xxx_core_res->codec_bulk_write = codec_bulk_write;
wcd9xxx_core_res->num_irqs = num_irqs;
wcd9xxx_core_res->num_irq_regs = num_irq_regs;
pr_info("%s: num_irqs = %d, num_irq_regs = %d\n",
__func__, wcd9xxx_core_res->num_irqs,
wcd9xxx_core_res->num_irq_regs);
return 0;
}
EXPORT_SYMBOL(wcd9xxx_core_res_init);
void wcd9xxx_core_res_deinit(struct wcd9xxx_core_resource *wcd9xxx_core_res)
{
pm_qos_remove_request(&wcd9xxx_core_res->pm_qos_req);
mutex_destroy(&wcd9xxx_core_res->pm_lock);
wcd9xxx_core_res->codec_reg_read = NULL;
wcd9xxx_core_res->codec_reg_write = NULL;
wcd9xxx_core_res->codec_bulk_read = NULL;
}
EXPORT_SYMBOL(wcd9xxx_core_res_deinit);
enum wcd9xxx_pm_state wcd9xxx_pm_cmpxchg(
struct wcd9xxx_core_resource *wcd9xxx_core_res,
enum wcd9xxx_pm_state o,
enum wcd9xxx_pm_state n)
{
enum wcd9xxx_pm_state old;
mutex_lock(&wcd9xxx_core_res->pm_lock);
old = wcd9xxx_core_res->pm_state;
if (old == o)
wcd9xxx_core_res->pm_state = n;
mutex_unlock(&wcd9xxx_core_res->pm_lock);
return old;
}
EXPORT_SYMBOL(wcd9xxx_pm_cmpxchg);
int wcd9xxx_core_res_suspend(
struct wcd9xxx_core_resource *wcd9xxx_core_res,
pm_message_t pmesg)
{
int ret = 0;
pr_debug("%s: enter\n", __func__);
/*
* pm_qos_update_request() can be called after this suspend chain call
* started. thus suspend can be called while lock is being held
*/
mutex_lock(&wcd9xxx_core_res->pm_lock);
if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_SLEEPABLE) {
pr_debug("%s: suspending system, state %d, wlock %d\n",
__func__, wcd9xxx_core_res->pm_state,
wcd9xxx_core_res->wlock_holders);
wcd9xxx_core_res->pm_state = WCD9XXX_PM_ASLEEP;
} else if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_AWAKE) {
/*
* unlock to wait for pm_state == WCD9XXX_PM_SLEEPABLE
* then set to WCD9XXX_PM_ASLEEP
*/
pr_debug("%s: waiting to suspend system, state %d, wlock %d\n",
__func__, wcd9xxx_core_res->pm_state,
wcd9xxx_core_res->wlock_holders);
mutex_unlock(&wcd9xxx_core_res->pm_lock);
if (!(wait_event_timeout(wcd9xxx_core_res->pm_wq,
wcd9xxx_pm_cmpxchg(wcd9xxx_core_res,
WCD9XXX_PM_SLEEPABLE,
WCD9XXX_PM_ASLEEP) ==
WCD9XXX_PM_SLEEPABLE,
HZ))) {
pr_debug("%s: suspend failed state %d, wlock %d\n",
__func__, wcd9xxx_core_res->pm_state,
wcd9xxx_core_res->wlock_holders);
ret = -EBUSY;
} else {
pr_debug("%s: done, state %d, wlock %d\n", __func__,
wcd9xxx_core_res->pm_state,
wcd9xxx_core_res->wlock_holders);
}
mutex_lock(&wcd9xxx_core_res->pm_lock);
} else if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_ASLEEP) {
pr_warn("%s: system is already suspended, state %d, wlock %dn",
__func__, wcd9xxx_core_res->pm_state,
wcd9xxx_core_res->wlock_holders);
}
mutex_unlock(&wcd9xxx_core_res->pm_lock);
return ret;
}
EXPORT_SYMBOL(wcd9xxx_core_res_suspend);
int wcd9xxx_core_res_resume(
struct wcd9xxx_core_resource *wcd9xxx_core_res)
{
int ret = 0;
pr_debug("%s: enter\n", __func__);
mutex_lock(&wcd9xxx_core_res->pm_lock);
if (wcd9xxx_core_res->pm_state == WCD9XXX_PM_ASLEEP) {
pr_debug("%s: resuming system, state %d, wlock %d\n", __func__,
wcd9xxx_core_res->pm_state,
wcd9xxx_core_res->wlock_holders);
wcd9xxx_core_res->pm_state = WCD9XXX_PM_SLEEPABLE;
} else {
pr_warn("%s: system is already awake, state %d wlock %d\n",
__func__, wcd9xxx_core_res->pm_state,
wcd9xxx_core_res->wlock_holders);
}
mutex_unlock(&wcd9xxx_core_res->pm_lock);
wake_up_all(&wcd9xxx_core_res->pm_wq);
return ret;
}
EXPORT_SYMBOL(wcd9xxx_core_res_resume);
enum wcd9xxx_intf_status wcd9xxx_get_intf_type(void)
{
return wcd9xxx_intf;
}
EXPORT_SYMBOL(wcd9xxx_get_intf_type);
void wcd9xxx_set_intf_type(enum wcd9xxx_intf_status intf_status)
{
wcd9xxx_intf = intf_status;
}
EXPORT_SYMBOL(wcd9xxx_set_intf_type);
| gpl-2.0 |
is00hcw/fastsocket | kernel/arch/arm/mach-at91/leds.c | 4601 | 3690 | /*
* LED driver for Atmel AT91-based boards.
*
* Copyright (C) SAN People (Pty) Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <mach/board.h>
#include <mach/gpio.h>
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_NEW_LEDS)
/*
* New cross-platform LED support.
*/
static struct gpio_led_platform_data led_data;
static struct platform_device at91_gpio_leds_device = {
.name = "leds-gpio",
.id = -1,
.dev.platform_data = &led_data,
};
void __init at91_gpio_leds(struct gpio_led *leds, int nr)
{
int i;
if (!nr)
return;
for (i = 0; i < nr; i++)
at91_set_gpio_output(leds[i].gpio, leds[i].active_low);
led_data.leds = leds;
led_data.num_leds = nr;
platform_device_register(&at91_gpio_leds_device);
}
#else
void __init at91_gpio_leds(struct gpio_led *leds, int nr) {}
#endif
/* ------------------------------------------------------------------------- */
#if defined (CONFIG_LEDS_ATMEL_PWM)
/*
* PWM Leds
*/
static struct gpio_led_platform_data pwm_led_data;
static struct platform_device at91_pwm_leds_device = {
.name = "leds-atmel-pwm",
.id = -1,
.dev.platform_data = &pwm_led_data,
};
void __init at91_pwm_leds(struct gpio_led *leds, int nr)
{
int i;
u32 pwm_mask = 0;
if (!nr)
return;
for (i = 0; i < nr; i++)
pwm_mask |= (1 << leds[i].gpio);
pwm_led_data.leds = leds;
pwm_led_data.num_leds = nr;
at91_add_device_pwm(pwm_mask);
platform_device_register(&at91_pwm_leds_device);
}
#else
void __init at91_pwm_leds(struct gpio_led *leds, int nr){}
#endif
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_LEDS)
#include <asm/leds.h>
/*
* Old ARM-specific LED framework; not fully functional when generic time is
* in use.
*/
static u8 at91_leds_cpu;
static u8 at91_leds_timer;
static inline void at91_led_on(unsigned int led)
{
at91_set_gpio_value(led, 0);
}
static inline void at91_led_off(unsigned int led)
{
at91_set_gpio_value(led, 1);
}
static inline void at91_led_toggle(unsigned int led)
{
unsigned long is_off = at91_get_gpio_value(led);
if (is_off)
at91_led_on(led);
else
at91_led_off(led);
}
/*
* Handle LED events.
*/
static void at91_leds_event(led_event_t evt)
{
unsigned long flags;
local_irq_save(flags);
switch(evt) {
case led_start: /* System startup */
at91_led_on(at91_leds_cpu);
break;
case led_stop: /* System stop / suspend */
at91_led_off(at91_leds_cpu);
break;
#ifdef CONFIG_LEDS_TIMER
case led_timer: /* Every 50 timer ticks */
at91_led_toggle(at91_leds_timer);
break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start: /* Entering idle state */
at91_led_off(at91_leds_cpu);
break;
case led_idle_end: /* Exit idle state */
at91_led_on(at91_leds_cpu);
break;
#endif
default:
break;
}
local_irq_restore(flags);
}
static int __init leds_init(void)
{
if (!at91_leds_timer || !at91_leds_cpu)
return -ENODEV;
leds_event = at91_leds_event;
leds_event(led_start);
return 0;
}
__initcall(leds_init);
void __init at91_init_leds(u8 cpu_led, u8 timer_led)
{
/* Enable GPIO to access the LEDs */
at91_set_gpio_output(cpu_led, 1);
at91_set_gpio_output(timer_led, 1);
at91_leds_cpu = cpu_led;
at91_leds_timer = timer_led;
}
#else
void __init at91_init_leds(u8 cpu_led, u8 timer_led) {}
#endif
| gpl-2.0 |
jamison904/kernel_jflte_tw | drivers/regulator/mc13xxx-regulator-core.c | 4857 | 8900 | /*
* Regulator Driver for Freescale MC13xxx PMIC
*
* Copyright 2010 Yong Shen <yong.shen@linaro.org>
*
* Based on mc13783 regulator driver :
* Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
* Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
* from freescale
*/
#include <linux/mfd/mc13xxx.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include "mc13xxx.h"
static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
int ret;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
mc13xxx_regulators[id].enable_bit,
mc13xxx_regulators[id].enable_bit);
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13xxx_regulator_disable(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
int ret;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
mc13xxx_regulators[id].enable_bit, 0);
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int ret, id = rdev_get_id(rdev);
unsigned int val;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
unsigned selector)
{
int id = rdev_get_id(rdev);
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
if (selector >= mc13xxx_regulators[id].desc.n_voltages)
return -EINVAL;
return mc13xxx_regulators[id].voltages[selector];
}
EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int reg_id = rdev_get_id(rdev);
int i;
int bestmatch;
int bestindex;
/*
* Locate the minimum voltage fitting the criteria on
* this regulator. The switchable voltages are not
* in strict falling order so we need to check them
* all for the best match.
*/
bestmatch = INT_MAX;
bestindex = -1;
for (i = 0; i < mc13xxx_regulators[reg_id].desc.n_voltages; i++) {
if (mc13xxx_regulators[reg_id].voltages[i] >= min_uV &&
mc13xxx_regulators[reg_id].voltages[i] < bestmatch) {
bestmatch = mc13xxx_regulators[reg_id].voltages[i];
bestindex = i;
}
}
if (bestindex < 0 || bestmatch > max_uV) {
dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
min_uV, max_uV);
return -EINVAL;
}
return bestindex;
}
EXPORT_SYMBOL_GPL(mc13xxx_get_best_voltage_index);
static int mc13xxx_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int value, id = rdev_get_id(rdev);
int ret;
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
__func__, id, min_uV, max_uV);
/* Find the best index */
value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
if (value < 0)
return value;
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg,
mc13xxx_regulators[id].vsel_mask,
value << mc13xxx_regulators[id].vsel_shift);
mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int ret, id = rdev_get_id(rdev);
unsigned int val;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx,
mc13xxx_regulators[id].vsel_reg, &val);
mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
val = (val & mc13xxx_regulators[id].vsel_mask)
>> mc13xxx_regulators[id].vsel_shift;
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}
struct regulator_ops mc13xxx_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
.list_voltage = mc13xxx_regulator_list_voltage,
.set_voltage = mc13xxx_regulator_set_voltage,
.get_voltage = mc13xxx_regulator_get_voltage,
};
EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
__func__, id, min_uV, max_uV);
if (min_uV >= mc13xxx_regulators[id].voltages[0] &&
max_uV <= mc13xxx_regulators[id].voltages[0])
return 0;
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
return mc13xxx_regulators[id].voltages[0];
}
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage);
struct regulator_ops mc13xxx_fixed_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
.list_voltage = mc13xxx_regulator_list_voltage,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
.get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
{
return 1;
}
EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
#ifdef CONFIG_OF
int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
struct device_node *parent, *child;
int num = 0;
of_node_get(pdev->dev.parent->of_node);
parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return -ENODEV;
for_each_child_of_node(parent, child)
num++;
return num;
}
EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
int num_regulators)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
struct mc13xxx_regulator_init_data *data, *p;
struct device_node *parent, *child;
int i;
of_node_get(pdev->dev.parent->of_node);
parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
return NULL;
data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
GFP_KERNEL);
if (!data)
return NULL;
p = data;
for_each_child_of_node(parent, child) {
for (i = 0; i < num_regulators; i++) {
if (!of_node_cmp(child->name,
regulators[i].desc.name)) {
p->id = i;
p->init_data = of_get_regulator_init_data(
&pdev->dev, child);
p->node = child;
p++;
break;
}
}
}
return data;
}
EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
#endif
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
MODULE_ALIAS("mc13xxx-regulator-core");
| gpl-2.0 |
androidbftab1/bf-kernel-3.4 | drivers/hwmon/lm92.c | 4857 | 13365 | /*
* lm92 - Hardware monitoring driver
* Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org>
*
* Based on the lm90 driver, with some ideas taken from the lm_sensors
* lm92 driver as well.
*
* The LM92 is a sensor chip made by National Semiconductor. It reports
* its own temperature with a 0.0625 deg resolution and a 0.33 deg
* accuracy. Complete datasheet can be obtained from National's website
* at:
* http://www.national.com/pf/LM/LM92.html
*
* This driver also supports the MAX6635 sensor chip made by Maxim.
* This chip is compatible with the LM92, but has a lesser accuracy
* (1.0 deg). Complete datasheet can be obtained from Maxim's website
* at:
* http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074
*
* Since the LM92 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
*
* Support could easily be added for the National Semiconductor LM76
* and Maxim MAX6633 and MAX6634 chips, which are mostly compatible
* with the LM92.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/*
* The LM92 and MAX6635 have 2 two-state pins for address selection,
* resulting in 4 possible addresses.
*/
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
/* The LM92 registers */
#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
#define LM92_REG_TEMP 0x00 /* 16-bit, RO */
#define LM92_REG_TEMP_HYST 0x02 /* 16-bit, RW */
#define LM92_REG_TEMP_CRIT 0x03 /* 16-bit, RW */
#define LM92_REG_TEMP_LOW 0x04 /* 16-bit, RW */
#define LM92_REG_TEMP_HIGH 0x05 /* 16-bit, RW */
#define LM92_REG_MAN_ID 0x07 /* 16-bit, RO, LM92 only */
/*
* The LM92 uses signed 13-bit values with LSB = 0.0625 degree Celsius,
* left-justified in 16-bit registers. No rounding is done, with such
* a resolution it's just not worth it. Note that the MAX6635 doesn't
* make use of the 4 lower bits for limits (i.e. effective resolution
* for limits is 1 degree Celsius).
*/
static inline int TEMP_FROM_REG(s16 reg)
{
return reg / 8 * 625 / 10;
}
static inline s16 TEMP_TO_REG(int val)
{
if (val <= -60000)
return -60000 * 10 / 625 * 8;
if (val >= 160000)
return 160000 * 10 / 625 * 8;
return val * 10 / 625 * 8;
}
/* Alarm flags are stored in the 3 LSB of the temperature register */
static inline u8 ALARMS_FROM_REG(s16 reg)
{
return reg & 0x0007;
}
/* Driver data (common to all clients) */
static struct i2c_driver lm92_driver;
/* Client data (each client gets its own) */
struct lm92_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
s16 temp1_input, temp1_crit, temp1_min, temp1_max, temp1_hyst;
};
/*
* Sysfs attributes and callback functions
*/
static struct lm92_data *lm92_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm92_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ)
|| !data->valid) {
dev_dbg(&client->dev, "Updating lm92 data\n");
data->temp1_input = i2c_smbus_read_word_swapped(client,
LM92_REG_TEMP);
data->temp1_hyst = i2c_smbus_read_word_swapped(client,
LM92_REG_TEMP_HYST);
data->temp1_crit = i2c_smbus_read_word_swapped(client,
LM92_REG_TEMP_CRIT);
data->temp1_min = i2c_smbus_read_word_swapped(client,
LM92_REG_TEMP_LOW);
data->temp1_max = i2c_smbus_read_word_swapped(client,
LM92_REG_TEMP_HIGH);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
#define show_temp(value) \
static ssize_t show_##value(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct lm92_data *data = lm92_update_device(dev); \
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->value)); \
}
show_temp(temp1_input);
show_temp(temp1_crit);
show_temp(temp1_min);
show_temp(temp1_max);
#define set_temp(value, reg) \
static ssize_t set_##value(struct device *dev, struct device_attribute *attr, \
const char *buf, \
size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct lm92_data *data = i2c_get_clientdata(client); \
long val; \
int err = kstrtol(buf, 10, &val); \
if (err) \
return err; \
\
mutex_lock(&data->update_lock); \
data->value = TEMP_TO_REG(val); \
i2c_smbus_write_word_swapped(client, reg, data->value); \
mutex_unlock(&data->update_lock); \
return count; \
}
set_temp(temp1_crit, LM92_REG_TEMP_CRIT);
set_temp(temp1_min, LM92_REG_TEMP_LOW);
set_temp(temp1_max, LM92_REG_TEMP_HIGH);
static ssize_t show_temp1_crit_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_crit)
- TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t show_temp1_max_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_max)
- TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t show_temp1_min_hyst(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_min)
+ TEMP_FROM_REG(data->temp1_hyst));
}
static ssize_t set_temp1_crit_hyst(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm92_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp1_hyst = TEMP_FROM_REG(data->temp1_crit) - val;
i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST,
TEMP_TO_REG(data->temp1_hyst));
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp1_input));
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct lm92_data *data = lm92_update_device(dev);
return sprintf(buf, "%d\n", (data->temp1_input >> bitnr) & 1);
}
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1_input, NULL);
static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp1_crit,
set_temp1_crit);
static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp1_crit_hyst,
set_temp1_crit_hyst);
static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp1_min,
set_temp1_min);
static DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_temp1_min_hyst, NULL);
static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp1_max,
set_temp1_max);
static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp1_max_hyst, NULL);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
/*
* Detection and registration
*/
static void lm92_init_client(struct i2c_client *client)
{
u8 config;
/* Start the conversions if needed */
config = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG);
if (config & 0x01)
i2c_smbus_write_byte_data(client, LM92_REG_CONFIG,
config & 0xFE);
}
/*
* The MAX6635 has no identification register, so we have to use tricks
* to identify it reliably. This is somewhat slow.
* Note that we do NOT rely on the 2 MSB of the configuration register
* always reading 0, as suggested by the datasheet, because it was once
* reported not to be true.
*/
static int max6635_check(struct i2c_client *client)
{
u16 temp_low, temp_high, temp_hyst, temp_crit;
u8 conf;
int i;
/*
* No manufacturer ID register, so a read from this address will
* always return the last read value.
*/
temp_low = i2c_smbus_read_word_data(client, LM92_REG_TEMP_LOW);
if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_low)
return 0;
temp_high = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HIGH);
if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_high)
return 0;
/* Limits are stored as integer values (signed, 9-bit). */
if ((temp_low & 0x7f00) || (temp_high & 0x7f00))
return 0;
temp_hyst = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HYST);
temp_crit = i2c_smbus_read_word_data(client, LM92_REG_TEMP_CRIT);
if ((temp_hyst & 0x7f00) || (temp_crit & 0x7f00))
return 0;
/*
* Registers addresses were found to cycle over 16-byte boundaries.
* We don't test all registers with all offsets so as to save some
* reads and time, but this should still be sufficient to dismiss
* non-MAX6635 chips.
*/
conf = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG);
for (i = 16; i < 96; i *= 2) {
if (temp_hyst != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HYST + i - 16)
|| temp_crit != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_CRIT + i)
|| temp_low != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_LOW + i + 16)
|| temp_high != i2c_smbus_read_word_data(client,
LM92_REG_TEMP_HIGH + i + 32)
|| conf != i2c_smbus_read_byte_data(client,
LM92_REG_CONFIG + i))
return 0;
}
return 1;
}
static struct attribute *lm92_attributes[] = {
&dev_attr_temp1_input.attr,
&dev_attr_temp1_crit.attr,
&dev_attr_temp1_crit_hyst.attr,
&dev_attr_temp1_min.attr,
&dev_attr_temp1_min_hyst.attr,
&dev_attr_temp1_max.attr,
&dev_attr_temp1_max_hyst.attr,
&dev_attr_alarms.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group lm92_group = {
.attrs = lm92_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm92_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
u8 config;
u16 man_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
config = i2c_smbus_read_byte_data(new_client, LM92_REG_CONFIG);
man_id = i2c_smbus_read_word_data(new_client, LM92_REG_MAN_ID);
if ((config & 0xe0) == 0x00 && man_id == 0x0180)
pr_info("lm92: Found National Semiconductor LM92 chip\n");
else if (max6635_check(new_client))
pr_info("lm92: Found Maxim MAX6635 chip\n");
else
return -ENODEV;
strlcpy(info->type, "lm92", I2C_NAME_SIZE);
return 0;
}
static int lm92_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct lm92_data *data;
int err;
data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(new_client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the chipset */
lm92_init_client(new_client);
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &lm92_group);
if (err)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return 0;
exit_remove:
sysfs_remove_group(&new_client->dev.kobj, &lm92_group);
exit_free:
kfree(data);
exit:
return err;
}
static int lm92_remove(struct i2c_client *client)
{
struct lm92_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm92_group);
kfree(data);
return 0;
}
/*
* Module and driver stuff
*/
static const struct i2c_device_id lm92_id[] = {
{ "lm92", 0 },
/* max6635 could be added here */
{ }
};
MODULE_DEVICE_TABLE(i2c, lm92_id);
static struct i2c_driver lm92_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm92",
},
.probe = lm92_probe,
.remove = lm92_remove,
.id_table = lm92_id,
.detect = lm92_detect,
.address_list = normal_i2c,
};
module_i2c_driver(lm92_driver);
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("LM92/MAX6635 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
thanhphat11/android_kernel_pantech_ef63l | drivers/hwmon/w83792d.c | 4857 | 57730 | /*
* w83792d.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 2004, 2005 Winbond Electronics Corp.
* Chunhao Huang <DZShen@Winbond.com.tw>,
* Rudolf Marek <r.marek@assembler.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Note:
* 1. This driver is only for 2.6 kernel, 2.4 kernel need a different driver.
* 2. This driver is only for Winbond W83792D C version device, there
* are also some motherboards with B version W83792D device. The
* calculation method to in6-in7(measured value, limits) is a little
* different between C and B version. C or B version can be identified
* by CR[0x49h].
*/
/*
* Supports following chips:
*
* Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83792d 9 7 7 3 0x7a 0x5ca3 yes no
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to one to force chip initialization");
/* The W83792D registers */
static const u8 W83792D_REG_IN[9] = {
0x20, /* Vcore A in DataSheet */
0x21, /* Vcore B in DataSheet */
0x22, /* VIN0 in DataSheet */
0x23, /* VIN1 in DataSheet */
0x24, /* VIN2 in DataSheet */
0x25, /* VIN3 in DataSheet */
0x26, /* 5VCC in DataSheet */
0xB0, /* 5VSB in DataSheet */
0xB1 /* VBAT in DataSheet */
};
#define W83792D_REG_LOW_BITS1 0x3E /* Low Bits I in DataSheet */
#define W83792D_REG_LOW_BITS2 0x3F /* Low Bits II in DataSheet */
static const u8 W83792D_REG_IN_MAX[9] = {
0x2B, /* Vcore A High Limit in DataSheet */
0x2D, /* Vcore B High Limit in DataSheet */
0x2F, /* VIN0 High Limit in DataSheet */
0x31, /* VIN1 High Limit in DataSheet */
0x33, /* VIN2 High Limit in DataSheet */
0x35, /* VIN3 High Limit in DataSheet */
0x37, /* 5VCC High Limit in DataSheet */
0xB4, /* 5VSB High Limit in DataSheet */
0xB6 /* VBAT High Limit in DataSheet */
};
static const u8 W83792D_REG_IN_MIN[9] = {
0x2C, /* Vcore A Low Limit in DataSheet */
0x2E, /* Vcore B Low Limit in DataSheet */
0x30, /* VIN0 Low Limit in DataSheet */
0x32, /* VIN1 Low Limit in DataSheet */
0x34, /* VIN2 Low Limit in DataSheet */
0x36, /* VIN3 Low Limit in DataSheet */
0x38, /* 5VCC Low Limit in DataSheet */
0xB5, /* 5VSB Low Limit in DataSheet */
0xB7 /* VBAT Low Limit in DataSheet */
};
static const u8 W83792D_REG_FAN[7] = {
0x28, /* FAN 1 Count in DataSheet */
0x29, /* FAN 2 Count in DataSheet */
0x2A, /* FAN 3 Count in DataSheet */
0xB8, /* FAN 4 Count in DataSheet */
0xB9, /* FAN 5 Count in DataSheet */
0xBA, /* FAN 6 Count in DataSheet */
0xBE /* FAN 7 Count in DataSheet */
};
static const u8 W83792D_REG_FAN_MIN[7] = {
0x3B, /* FAN 1 Count Low Limit in DataSheet */
0x3C, /* FAN 2 Count Low Limit in DataSheet */
0x3D, /* FAN 3 Count Low Limit in DataSheet */
0xBB, /* FAN 4 Count Low Limit in DataSheet */
0xBC, /* FAN 5 Count Low Limit in DataSheet */
0xBD, /* FAN 6 Count Low Limit in DataSheet */
0xBF /* FAN 7 Count Low Limit in DataSheet */
};
#define W83792D_REG_FAN_CFG 0x84 /* FAN Configuration in DataSheet */
static const u8 W83792D_REG_FAN_DIV[4] = {
0x47, /* contains FAN2 and FAN1 Divisor */
0x5B, /* contains FAN4 and FAN3 Divisor */
0x5C, /* contains FAN6 and FAN5 Divisor */
0x9E /* contains FAN7 Divisor. */
};
static const u8 W83792D_REG_PWM[7] = {
0x81, /* FAN 1 Duty Cycle, be used to control */
0x83, /* FAN 2 Duty Cycle, be used to control */
0x94, /* FAN 3 Duty Cycle, be used to control */
0xA3, /* FAN 4 Duty Cycle, be used to control */
0xA4, /* FAN 5 Duty Cycle, be used to control */
0xA5, /* FAN 6 Duty Cycle, be used to control */
0xA6 /* FAN 7 Duty Cycle, be used to control */
};
#define W83792D_REG_BANK 0x4E
#define W83792D_REG_TEMP2_CONFIG 0xC2
#define W83792D_REG_TEMP3_CONFIG 0xCA
static const u8 W83792D_REG_TEMP1[3] = {
0x27, /* TEMP 1 in DataSheet */
0x39, /* TEMP 1 Over in DataSheet */
0x3A, /* TEMP 1 Hyst in DataSheet */
};
static const u8 W83792D_REG_TEMP_ADD[2][6] = {
{ 0xC0, /* TEMP 2 in DataSheet */
0xC1, /* TEMP 2(0.5 deg) in DataSheet */
0xC5, /* TEMP 2 Over High part in DataSheet */
0xC6, /* TEMP 2 Over Low part in DataSheet */
0xC3, /* TEMP 2 Thyst High part in DataSheet */
0xC4 }, /* TEMP 2 Thyst Low part in DataSheet */
{ 0xC8, /* TEMP 3 in DataSheet */
0xC9, /* TEMP 3(0.5 deg) in DataSheet */
0xCD, /* TEMP 3 Over High part in DataSheet */
0xCE, /* TEMP 3 Over Low part in DataSheet */
0xCB, /* TEMP 3 Thyst High part in DataSheet */
0xCC } /* TEMP 3 Thyst Low part in DataSheet */
};
static const u8 W83792D_REG_THERMAL[3] = {
0x85, /* SmartFanI: Fan1 target value */
0x86, /* SmartFanI: Fan2 target value */
0x96 /* SmartFanI: Fan3 target value */
};
static const u8 W83792D_REG_TOLERANCE[3] = {
0x87, /* (bit3-0)SmartFan Fan1 tolerance */
0x87, /* (bit7-4)SmartFan Fan2 tolerance */
0x97 /* (bit3-0)SmartFan Fan3 tolerance */
};
static const u8 W83792D_REG_POINTS[3][4] = {
{ 0x85, /* SmartFanII: Fan1 temp point 1 */
0xE3, /* SmartFanII: Fan1 temp point 2 */
0xE4, /* SmartFanII: Fan1 temp point 3 */
0xE5 }, /* SmartFanII: Fan1 temp point 4 */
{ 0x86, /* SmartFanII: Fan2 temp point 1 */
0xE6, /* SmartFanII: Fan2 temp point 2 */
0xE7, /* SmartFanII: Fan2 temp point 3 */
0xE8 }, /* SmartFanII: Fan2 temp point 4 */
{ 0x96, /* SmartFanII: Fan3 temp point 1 */
0xE9, /* SmartFanII: Fan3 temp point 2 */
0xEA, /* SmartFanII: Fan3 temp point 3 */
0xEB } /* SmartFanII: Fan3 temp point 4 */
};
static const u8 W83792D_REG_LEVELS[3][4] = {
{ 0x88, /* (bit3-0) SmartFanII: Fan1 Non-Stop */
0x88, /* (bit7-4) SmartFanII: Fan1 Level 1 */
0xE0, /* (bit7-4) SmartFanII: Fan1 Level 2 */
0xE0 }, /* (bit3-0) SmartFanII: Fan1 Level 3 */
{ 0x89, /* (bit3-0) SmartFanII: Fan2 Non-Stop */
0x89, /* (bit7-4) SmartFanII: Fan2 Level 1 */
0xE1, /* (bit7-4) SmartFanII: Fan2 Level 2 */
0xE1 }, /* (bit3-0) SmartFanII: Fan2 Level 3 */
{ 0x98, /* (bit3-0) SmartFanII: Fan3 Non-Stop */
0x98, /* (bit7-4) SmartFanII: Fan3 Level 1 */
0xE2, /* (bit7-4) SmartFanII: Fan3 Level 2 */
0xE2 } /* (bit3-0) SmartFanII: Fan3 Level 3 */
};
#define W83792D_REG_GPIO_EN 0x1A
#define W83792D_REG_CONFIG 0x40
#define W83792D_REG_VID_FANDIV 0x47
#define W83792D_REG_CHIPID 0x49
#define W83792D_REG_WCHIPID 0x58
#define W83792D_REG_CHIPMAN 0x4F
#define W83792D_REG_PIN 0x4B
#define W83792D_REG_I2C_SUBADDR 0x4A
#define W83792D_REG_ALARM1 0xA9 /* realtime status register1 */
#define W83792D_REG_ALARM2 0xAA /* realtime status register2 */
#define W83792D_REG_ALARM3 0xAB /* realtime status register3 */
#define W83792D_REG_CHASSIS 0x42 /* Bit 5: Case Open status bit */
#define W83792D_REG_CHASSIS_CLR 0x44 /* Bit 7: Case Open CLR_CHS/Reset bit */
/* control in0/in1 's limit modifiability */
#define W83792D_REG_VID_IN_B 0x17
#define W83792D_REG_VBAT 0x5D
#define W83792D_REG_I2C_ADDR 0x48
/*
* Conversions. Rounding and limit checking is only done on the TO_REG
* variants. Note that you should be a bit careful with which arguments
* these macros are called: arguments may be evaluated more than once.
* Fixing this is just not worth it.
*/
#define IN_FROM_REG(nr, val) (((nr) <= 1) ? ((val) * 2) : \
((((nr) == 6) || ((nr) == 7)) ? ((val) * 6) : ((val) * 4)))
#define IN_TO_REG(nr, val) (((nr) <= 1) ? ((val) / 2) : \
((((nr) == 6) || ((nr) == 7)) ? ((val) / 6) : ((val) / 4)))
static inline u8
FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
rpm = SENSORS_LIMIT(rpm, 1, 1000000);
return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
((val) == 255 ? 0 : \
1350000 / ((val) * (div))))
/* for temp1 */
#define TEMP1_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
: (val)) / 1000, 0, 0xff))
#define TEMP1_FROM_REG(val) (((val) & 0x80 ? (val)-0x100 : (val)) * 1000)
/* for temp2 and temp3, because they need additional resolution */
#define TEMP_ADD_FROM_REG(val1, val2) \
((((val1) & 0x80 ? (val1)-0x100 \
: (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0))
#define TEMP_ADD_TO_REG_HIGH(val) \
(SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
: (val)) / 1000, 0, 0xff))
#define TEMP_ADD_TO_REG_LOW(val) ((val%1000) ? 0x80 : 0x00)
#define DIV_FROM_REG(val) (1 << (val))
static inline u8
DIV_TO_REG(long val)
{
int i;
val = SENSORS_LIMIT(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
val >>= 1;
}
return (u8)i;
}
struct w83792d_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* array of 2 pointers to subclients */
struct i2c_client *lm75[2];
u8 in[9]; /* Register value */
u8 in_max[9]; /* Register value */
u8 in_min[9]; /* Register value */
u16 low_bits; /* Additional resolution to voltage in6-0 */
u8 fan[7]; /* Register value */
u8 fan_min[7]; /* Register value */
u8 temp1[3]; /* current, over, thyst */
u8 temp_add[2][6]; /* Register value */
u8 fan_div[7]; /* Register encoding, shifted right */
u8 pwm[7]; /*
* We only consider the first 3 set of pwm,
* although 792 chip has 7 set of pwm.
*/
u8 pwmenable[3];
u32 alarms; /* realtime status register encoding,combined */
u8 chassis; /* Chassis status */
u8 chassis_clear; /* CLR_CHS, clear chassis intrusion detection */
u8 thermal_cruise[3]; /* Smart FanI: Fan1,2,3 target value */
u8 tolerance[3]; /* Fan1,2,3 tolerance(Smart Fan I/II) */
u8 sf2_points[3][4]; /* Smart FanII: Fan1,2,3 temperature points */
u8 sf2_levels[3][4]; /* Smart FanII: Fan1,2,3 duty cycle levels */
};
static int w83792d_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int w83792d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83792d_remove(struct i2c_client *client);
static struct w83792d_data *w83792d_update_device(struct device *dev);
#ifdef DEBUG
static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
#endif
static void w83792d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83792d_id[] = {
{ "w83792d", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83792d_id);
static struct i2c_driver w83792d_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "w83792d",
},
.probe = w83792d_probe,
.remove = w83792d_remove,
.id_table = w83792d_id,
.detect = w83792d_detect,
.address_list = normal_i2c,
};
static inline long in_count_from_reg(int nr, struct w83792d_data *data)
{
/* in7 and in8 do not have low bits, but the formula still works */
return (data->in[nr] << 2) | ((data->low_bits >> (2 * nr)) & 0x03);
}
/*
* The SMBus locks itself. The Winbond W83792D chip has a bank register,
* but the driver only accesses registers in bank 0, so we don't have
* to switch banks and lock access between switches.
*/
static inline int w83792d_read_value(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static inline int
w83792d_write_value(struct i2c_client *client, u8 reg, u8 value)
{
return i2c_smbus_write_byte_data(client, reg, value);
}
/* following are the sysfs callback functions */
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%ld\n",
IN_FROM_REG(nr, in_count_from_reg(nr, data)));
}
#define show_in_reg(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct sensor_device_attribute *sensor_attr \
= to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
struct w83792d_data *data = w83792d_update_device(dev); \
return sprintf(buf, "%ld\n", \
(long)(IN_FROM_REG(nr, data->reg[nr]) * 4)); \
}
show_in_reg(in_min);
show_in_reg(in_max);
#define store_in_reg(REG, reg) \
static ssize_t store_in_##reg(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct sensor_device_attribute *sensor_attr \
= to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
struct i2c_client *client = to_i2c_client(dev); \
struct w83792d_data *data = i2c_get_clientdata(client); \
unsigned long val; \
int err = kstrtoul(buf, 10, &val); \
if (err) \
return err; \
mutex_lock(&data->update_lock); \
data->in_##reg[nr] = SENSORS_LIMIT(IN_TO_REG(nr, val) / 4, 0, 255); \
w83792d_write_value(client, W83792D_REG_IN_##REG[nr], \
data->in_##reg[nr]); \
mutex_unlock(&data->update_lock); \
\
return count; \
}
store_in_reg(MIN, min);
store_in_reg(MAX, max);
#define show_fan_reg(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct sensor_device_attribute *sensor_attr \
= to_sensor_dev_attr(attr); \
int nr = sensor_attr->index - 1; \
struct w83792d_data *data = w83792d_update_device(dev); \
return sprintf(buf, "%d\n", \
FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
}
show_fan_reg(fan);
show_fan_reg(fan_min);
static ssize_t
store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index - 1;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
w83792d_write_value(client, W83792D_REG_FAN_MIN[nr],
data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr - 1]));
}
/*
* Note: we save and restore the fan minimum here, because its value is
* determined in part by the fan divisor. This follows the principle of
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
static ssize_t
store_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index - 1;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
unsigned long min;
/*u8 reg;*/
u8 fan_div_reg = 0;
u8 tmp_fan_div;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
/* Save fan_min */
mutex_lock(&data->update_lock);
min = FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr]));
data->fan_div[nr] = DIV_TO_REG(val);
fan_div_reg = w83792d_read_value(client, W83792D_REG_FAN_DIV[nr >> 1]);
fan_div_reg &= (nr & 0x01) ? 0x8f : 0xf8;
tmp_fan_div = (nr & 0x01) ? (((data->fan_div[nr]) << 4) & 0x70)
: ((data->fan_div[nr]) & 0x07);
w83792d_write_value(client, W83792D_REG_FAN_DIV[nr >> 1],
fan_div_reg | tmp_fan_div);
/* Restore fan_min */
data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
w83792d_write_value(client, W83792D_REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
/* read/write the temperature1, includes measured value and limits */
static ssize_t show_temp1(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp1[nr]));
}
static ssize_t store_temp1(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp1[nr] = TEMP1_TO_REG(val);
w83792d_write_value(client, W83792D_REG_TEMP1[nr],
data->temp1[nr]);
mutex_unlock(&data->update_lock);
return count;
}
/* read/write the temperature2-3, includes measured value and limits */
static ssize_t show_temp23(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute_2 *sensor_attr
= to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%ld\n",
(long)TEMP_ADD_FROM_REG(data->temp_add[nr][index],
data->temp_add[nr][index+1]));
}
static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr
= to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_add[nr][index] = TEMP_ADD_TO_REG_HIGH(val);
data->temp_add[nr][index+1] = TEMP_ADD_TO_REG_LOW(val);
w83792d_write_value(client, W83792D_REG_TEMP_ADD[nr][index],
data->temp_add[nr][index]);
w83792d_write_value(client, W83792D_REG_TEMP_ADD[nr][index+1],
data->temp_add[nr][index+1]);
mutex_unlock(&data->update_lock);
return count;
}
/* get reatime status of all sensors items: voltage, temp, fan */
static ssize_t
show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
static ssize_t show_alarm(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", (data->alarms >> nr) & 1);
}
static ssize_t
show_pwm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", (data->pwm[nr] & 0x0f) << 4);
}
static ssize_t
show_pwmenable(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index - 1;
struct w83792d_data *data = w83792d_update_device(dev);
long pwm_enable_tmp = 1;
switch (data->pwmenable[nr]) {
case 0:
pwm_enable_tmp = 1; /* manual mode */
break;
case 1:
pwm_enable_tmp = 3; /*thermal cruise/Smart Fan I */
break;
case 2:
pwm_enable_tmp = 2; /* Smart Fan II */
break;
}
return sprintf(buf, "%ld\n", pwm_enable_tmp);
}
static ssize_t
store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
val = SENSORS_LIMIT(val, 0, 255) >> 4;
mutex_lock(&data->update_lock);
val |= w83792d_read_value(client, W83792D_REG_PWM[nr]) & 0xf0;
data->pwm[nr] = val;
w83792d_write_value(client, W83792D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
store_pwmenable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index - 1;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
u8 fan_cfg_tmp, cfg1_tmp, cfg2_tmp, cfg3_tmp, cfg4_tmp;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val < 1 || val > 3)
return -EINVAL;
mutex_lock(&data->update_lock);
switch (val) {
case 1:
data->pwmenable[nr] = 0; /* manual mode */
break;
case 2:
data->pwmenable[nr] = 2; /* Smart Fan II */
break;
case 3:
data->pwmenable[nr] = 1; /* thermal cruise/Smart Fan I */
break;
}
cfg1_tmp = data->pwmenable[0];
cfg2_tmp = (data->pwmenable[1]) << 2;
cfg3_tmp = (data->pwmenable[2]) << 4;
cfg4_tmp = w83792d_read_value(client, W83792D_REG_FAN_CFG) & 0xc0;
fan_cfg_tmp = ((cfg4_tmp | cfg3_tmp) | cfg2_tmp) | cfg1_tmp;
w83792d_write_value(client, W83792D_REG_FAN_CFG, fan_cfg_tmp);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_pwm_mode(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", data->pwm[nr] >> 7);
}
static ssize_t
store_pwm_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->pwm[nr] = w83792d_read_value(client, W83792D_REG_PWM[nr]);
if (val) { /* PWM mode */
data->pwm[nr] |= 0x80;
} else { /* DC mode */
data->pwm[nr] &= 0x7f;
}
w83792d_write_value(client, W83792D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_chassis(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", data->chassis);
}
static ssize_t
show_regs_chassis(struct device *dev, struct device_attribute *attr,
char *buf)
{
dev_warn(dev,
"Attribute %s is deprecated, use intrusion0_alarm instead\n",
"chassis");
return show_chassis(dev, attr, buf);
}
static ssize_t
show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n", data->chassis_clear);
}
static ssize_t
store_chassis_clear_legacy(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
u8 temp1 = 0, temp2 = 0;
dev_warn(dev,
"Attribute %s is deprecated, use intrusion0_alarm instead\n",
"chassis_clear");
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->chassis_clear = SENSORS_LIMIT(val, 0, 1);
temp1 = ((data->chassis_clear) << 7) & 0x80;
temp2 = w83792d_read_value(client,
W83792D_REG_CHASSIS_CLR) & 0x7f;
w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, temp1 | temp2);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
store_chassis_clear(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
unsigned long val;
u8 reg;
if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR);
w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
/* For Smart Fan I / Thermal Cruise */
static ssize_t
show_thermal_cruise(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%ld\n", (long)data->thermal_cruise[nr-1]);
}
static ssize_t
store_thermal_cruise(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index - 1;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
u8 target_tmp = 0, target_mask = 0;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
target_tmp = val;
target_tmp = target_tmp & 0x7f;
mutex_lock(&data->update_lock);
target_mask = w83792d_read_value(client,
W83792D_REG_THERMAL[nr]) & 0x80;
data->thermal_cruise[nr] = SENSORS_LIMIT(target_tmp, 0, 255);
w83792d_write_value(client, W83792D_REG_THERMAL[nr],
(data->thermal_cruise[nr]) | target_mask);
mutex_unlock(&data->update_lock);
return count;
}
/* For Smart Fan I/Thermal Cruise and Smart Fan II */
static ssize_t
show_tolerance(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%ld\n", (long)data->tolerance[nr-1]);
}
static ssize_t
store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index - 1;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
u8 tol_tmp, tol_mask;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
tol_mask = w83792d_read_value(client,
W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0);
tol_tmp = SENSORS_LIMIT(val, 0, 15);
tol_tmp &= 0x0f;
data->tolerance[nr] = tol_tmp;
if (nr == 1)
tol_tmp <<= 4;
w83792d_write_value(client, W83792D_REG_TOLERANCE[nr],
tol_mask | tol_tmp);
mutex_unlock(&data->update_lock);
return count;
}
/* For Smart Fan II */
static ssize_t
show_sf2_point(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute_2 *sensor_attr
= to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%ld\n", (long)data->sf2_points[index-1][nr-1]);
}
static ssize_t
store_sf2_point(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr
= to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr - 1;
int index = sensor_attr->index - 1;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
u8 mask_tmp = 0;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->sf2_points[index][nr] = SENSORS_LIMIT(val, 0, 127);
mask_tmp = w83792d_read_value(client,
W83792D_REG_POINTS[index][nr]) & 0x80;
w83792d_write_value(client, W83792D_REG_POINTS[index][nr],
mask_tmp|data->sf2_points[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf2_level(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute_2 *sensor_attr
= to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83792d_data *data = w83792d_update_device(dev);
return sprintf(buf, "%d\n",
(((data->sf2_levels[index-1][nr]) * 100) / 15));
}
static ssize_t
store_sf2_level(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr
= to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index - 1;
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
u8 mask_tmp = 0, level_tmp = 0;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->sf2_levels[index][nr] = SENSORS_LIMIT((val * 15) / 100, 0, 15);
mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr])
& ((nr == 3) ? 0xf0 : 0x0f);
if (nr == 3)
level_tmp = data->sf2_levels[index][nr];
else
level_tmp = data->sf2_levels[index][nr] << 4;
w83792d_write_value(client, W83792D_REG_LEVELS[index][nr],
level_tmp | mask_tmp);
mutex_unlock(&data->update_lock);
return count;
}
static int
w83792d_detect_subclients(struct i2c_client *new_client)
{
int i, id, err;
int address = new_client->addr;
u8 val;
struct i2c_adapter *adapter = new_client->adapter;
struct w83792d_data *data = i2c_get_clientdata(new_client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
for (i = 2; i <= 3; i++) {
if (force_subclients[i] < 0x48 ||
force_subclients[i] > 0x4f) {
dev_err(&new_client->dev, "invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
err = -ENODEV;
goto ERROR_SC_0;
}
}
w83792d_write_value(new_client, W83792D_REG_I2C_SUBADDR,
(force_subclients[2] & 0x07) |
((force_subclients[3] & 0x07) << 4));
}
val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
if (!(val & 0x08))
data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
if (!(val & 0x80)) {
if ((data->lm75[0] != NULL) &&
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&new_client->dev, "duplicate addresses 0x%x, "
"use force_subclient\n", data->lm75[0]->addr);
err = -ENODEV;
goto ERROR_SC_1;
}
data->lm75[1] = i2c_new_dummy(adapter,
0x48 + ((val >> 4) & 0x7));
}
return 0;
/* Undo inits in case of errors */
ERROR_SC_1:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 2);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 3);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 4);
static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 6);
static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 7);
static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 8);
static SENSOR_DEVICE_ATTR(in0_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 0);
static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 1);
static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 2);
static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 3);
static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 4);
static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 5);
static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 6);
static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 7);
static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO,
show_in_min, store_in_min, 8);
static SENSOR_DEVICE_ATTR(in0_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 0);
static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 1);
static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 2);
static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 3);
static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 4);
static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 5);
static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 6);
static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 7);
static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO,
show_in_max, store_in_max, 8);
static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp1, NULL, 0, 0);
static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp23, NULL, 0, 0);
static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp23, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR,
show_temp1, store_temp1, 0, 1);
static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp23,
store_temp23, 0, 2);
static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp23,
store_temp23, 1, 2);
static SENSOR_DEVICE_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR,
show_temp1, store_temp1, 0, 2);
static SENSOR_DEVICE_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 0, 4);
static SENSOR_DEVICE_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 1, 4);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 7);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 8);
static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 9);
static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 10);
static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 11);
static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL, 15);
static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 19);
static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20);
static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21);
static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22);
static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23);
static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL);
static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR,
show_chassis_clear, store_chassis_clear_legacy);
static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
show_chassis, store_chassis_clear);
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 1);
static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 2);
static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 3);
static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO,
show_pwm_mode, store_pwm_mode, 0);
static SENSOR_DEVICE_ATTR(pwm2_mode, S_IWUSR | S_IRUGO,
show_pwm_mode, store_pwm_mode, 1);
static SENSOR_DEVICE_ATTR(pwm3_mode, S_IWUSR | S_IRUGO,
show_pwm_mode, store_pwm_mode, 2);
static SENSOR_DEVICE_ATTR(tolerance1, S_IWUSR | S_IRUGO,
show_tolerance, store_tolerance, 1);
static SENSOR_DEVICE_ATTR(tolerance2, S_IWUSR | S_IRUGO,
show_tolerance, store_tolerance, 2);
static SENSOR_DEVICE_ATTR(tolerance3, S_IWUSR | S_IRUGO,
show_tolerance, store_tolerance, 3);
static SENSOR_DEVICE_ATTR(thermal_cruise1, S_IWUSR | S_IRUGO,
show_thermal_cruise, store_thermal_cruise, 1);
static SENSOR_DEVICE_ATTR(thermal_cruise2, S_IWUSR | S_IRUGO,
show_thermal_cruise, store_thermal_cruise, 2);
static SENSOR_DEVICE_ATTR(thermal_cruise3, S_IWUSR | S_IRUGO,
show_thermal_cruise, store_thermal_cruise, 3);
static SENSOR_DEVICE_ATTR_2(sf2_point1_fan1, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 1, 1);
static SENSOR_DEVICE_ATTR_2(sf2_point2_fan1, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 2, 1);
static SENSOR_DEVICE_ATTR_2(sf2_point3_fan1, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 3, 1);
static SENSOR_DEVICE_ATTR_2(sf2_point4_fan1, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 4, 1);
static SENSOR_DEVICE_ATTR_2(sf2_point1_fan2, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 1, 2);
static SENSOR_DEVICE_ATTR_2(sf2_point2_fan2, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 2, 2);
static SENSOR_DEVICE_ATTR_2(sf2_point3_fan2, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 3, 2);
static SENSOR_DEVICE_ATTR_2(sf2_point4_fan2, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 4, 2);
static SENSOR_DEVICE_ATTR_2(sf2_point1_fan3, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 1, 3);
static SENSOR_DEVICE_ATTR_2(sf2_point2_fan3, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 2, 3);
static SENSOR_DEVICE_ATTR_2(sf2_point3_fan3, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 3, 3);
static SENSOR_DEVICE_ATTR_2(sf2_point4_fan3, S_IRUGO | S_IWUSR,
show_sf2_point, store_sf2_point, 4, 3);
static SENSOR_DEVICE_ATTR_2(sf2_level1_fan1, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 1, 1);
static SENSOR_DEVICE_ATTR_2(sf2_level2_fan1, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 2, 1);
static SENSOR_DEVICE_ATTR_2(sf2_level3_fan1, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 3, 1);
static SENSOR_DEVICE_ATTR_2(sf2_level1_fan2, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 1, 2);
static SENSOR_DEVICE_ATTR_2(sf2_level2_fan2, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 2, 2);
static SENSOR_DEVICE_ATTR_2(sf2_level3_fan2, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 3, 2);
static SENSOR_DEVICE_ATTR_2(sf2_level1_fan3, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 1, 3);
static SENSOR_DEVICE_ATTR_2(sf2_level2_fan3, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 2, 3);
static SENSOR_DEVICE_ATTR_2(sf2_level3_fan3, S_IRUGO | S_IWUSR,
show_sf2_level, store_sf2_level, 3, 3);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 3);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 4);
static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 5);
static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan, NULL, 6);
static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan, NULL, 7);
static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 1);
static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 2);
static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 3);
static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 4);
static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 5);
static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 6);
static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 7);
static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 1);
static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 2);
static SENSOR_DEVICE_ATTR(fan3_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 3);
static SENSOR_DEVICE_ATTR(fan4_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 4);
static SENSOR_DEVICE_ATTR(fan5_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 5);
static SENSOR_DEVICE_ATTR(fan6_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 6);
static SENSOR_DEVICE_ATTR(fan7_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 7);
static struct attribute *w83792d_attributes_fan[4][5] = {
{
&sensor_dev_attr_fan4_input.dev_attr.attr,
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_div.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
NULL
}, {
&sensor_dev_attr_fan5_input.dev_attr.attr,
&sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_div.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
NULL
}, {
&sensor_dev_attr_fan6_input.dev_attr.attr,
&sensor_dev_attr_fan6_min.dev_attr.attr,
&sensor_dev_attr_fan6_div.dev_attr.attr,
&sensor_dev_attr_fan6_alarm.dev_attr.attr,
NULL
}, {
&sensor_dev_attr_fan7_input.dev_attr.attr,
&sensor_dev_attr_fan7_min.dev_attr.attr,
&sensor_dev_attr_fan7_div.dev_attr.attr,
&sensor_dev_attr_fan7_alarm.dev_attr.attr,
NULL
}
};
static const struct attribute_group w83792d_group_fan[4] = {
{ .attrs = w83792d_attributes_fan[0] },
{ .attrs = w83792d_attributes_fan[1] },
{ .attrs = w83792d_attributes_fan[2] },
{ .attrs = w83792d_attributes_fan[3] },
};
static struct attribute *w83792d_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in6_min.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
&sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_in8_max.dev_attr.attr,
&sensor_dev_attr_in8_min.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
&sensor_dev_attr_in7_alarm.dev_attr.attr,
&sensor_dev_attr_in8_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_mode.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_pwm2_mode.dev_attr.attr,
&sensor_dev_attr_pwm2_enable.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
&sensor_dev_attr_pwm3_mode.dev_attr.attr,
&sensor_dev_attr_pwm3_enable.dev_attr.attr,
&dev_attr_alarms.attr,
&dev_attr_chassis.attr,
&dev_attr_chassis_clear.attr,
&dev_attr_intrusion0_alarm.attr,
&sensor_dev_attr_tolerance1.dev_attr.attr,
&sensor_dev_attr_thermal_cruise1.dev_attr.attr,
&sensor_dev_attr_tolerance2.dev_attr.attr,
&sensor_dev_attr_thermal_cruise2.dev_attr.attr,
&sensor_dev_attr_tolerance3.dev_attr.attr,
&sensor_dev_attr_thermal_cruise3.dev_attr.attr,
&sensor_dev_attr_sf2_point1_fan1.dev_attr.attr,
&sensor_dev_attr_sf2_point2_fan1.dev_attr.attr,
&sensor_dev_attr_sf2_point3_fan1.dev_attr.attr,
&sensor_dev_attr_sf2_point4_fan1.dev_attr.attr,
&sensor_dev_attr_sf2_point1_fan2.dev_attr.attr,
&sensor_dev_attr_sf2_point2_fan2.dev_attr.attr,
&sensor_dev_attr_sf2_point3_fan2.dev_attr.attr,
&sensor_dev_attr_sf2_point4_fan2.dev_attr.attr,
&sensor_dev_attr_sf2_point1_fan3.dev_attr.attr,
&sensor_dev_attr_sf2_point2_fan3.dev_attr.attr,
&sensor_dev_attr_sf2_point3_fan3.dev_attr.attr,
&sensor_dev_attr_sf2_point4_fan3.dev_attr.attr,
&sensor_dev_attr_sf2_level1_fan1.dev_attr.attr,
&sensor_dev_attr_sf2_level2_fan1.dev_attr.attr,
&sensor_dev_attr_sf2_level3_fan1.dev_attr.attr,
&sensor_dev_attr_sf2_level1_fan2.dev_attr.attr,
&sensor_dev_attr_sf2_level2_fan2.dev_attr.attr,
&sensor_dev_attr_sf2_level3_fan2.dev_attr.attr,
&sensor_dev_attr_sf2_level1_fan3.dev_attr.attr,
&sensor_dev_attr_sf2_level2_fan3.dev_attr.attr,
&sensor_dev_attr_sf2_level3_fan3.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_div.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group w83792d_group = {
.attrs = w83792d_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int
w83792d_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int val1, val2;
unsigned short address = client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
if (w83792d_read_value(client, W83792D_REG_CONFIG) & 0x80)
return -ENODEV;
val1 = w83792d_read_value(client, W83792D_REG_BANK);
val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN);
/* Check for Winbond ID if in bank 0 */
if (!(val1 & 0x07)) { /* is Bank0 */
if ((!(val1 & 0x80) && val2 != 0xa3) ||
((val1 & 0x80) && val2 != 0x5c))
return -ENODEV;
}
/*
* If Winbond chip, address of chip and W83792D_REG_I2C_ADDR
* should match
*/
if (w83792d_read_value(client, W83792D_REG_I2C_ADDR) != address)
return -ENODEV;
/* Put it now into bank 0 and Vendor ID High Byte */
w83792d_write_value(client,
W83792D_REG_BANK,
(w83792d_read_value(client,
W83792D_REG_BANK) & 0x78) | 0x80);
/* Determine the chip type. */
val1 = w83792d_read_value(client, W83792D_REG_WCHIPID);
val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN);
if (val1 != 0x7a || val2 != 0x5c)
return -ENODEV;
strlcpy(info->type, "w83792d", I2C_NAME_SIZE);
return 0;
}
static int
w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct w83792d_data *data;
struct device *dev = &client->dev;
int i, val1, err;
data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto ERROR0;
}
i2c_set_clientdata(client, data);
data->valid = 0;
mutex_init(&data->update_lock);
err = w83792d_detect_subclients(client);
if (err)
goto ERROR1;
/* Initialize the chip */
w83792d_init_client(client);
/* A few vars need to be filled upon startup */
for (i = 0; i < 7; i++) {
data->fan_min[i] = w83792d_read_value(client,
W83792D_REG_FAN_MIN[i]);
}
/* Register sysfs hooks */
err = sysfs_create_group(&dev->kobj, &w83792d_group);
if (err)
goto ERROR3;
/*
* Read GPIO enable register to check if pins for fan 4,5 are used as
* GPIO
*/
val1 = w83792d_read_value(client, W83792D_REG_GPIO_EN);
if (!(val1 & 0x40)) {
err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[0]);
if (err)
goto exit_remove_files;
}
if (!(val1 & 0x20)) {
err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[1]);
if (err)
goto exit_remove_files;
}
val1 = w83792d_read_value(client, W83792D_REG_PIN);
if (val1 & 0x40) {
err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[2]);
if (err)
goto exit_remove_files;
}
if (val1 & 0x04) {
err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[3]);
if (err)
goto exit_remove_files;
}
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
return 0;
exit_remove_files:
sysfs_remove_group(&dev->kobj, &w83792d_group);
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
ERROR3:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
ERROR1:
kfree(data);
ERROR0:
return err;
}
static int
w83792d_remove(struct i2c_client *client)
{
struct w83792d_data *data = i2c_get_clientdata(client);
int i;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83792d_group);
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&client->dev.kobj,
&w83792d_group_fan[i]);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
kfree(data);
return 0;
}
static void
w83792d_init_client(struct i2c_client *client)
{
u8 temp2_cfg, temp3_cfg, vid_in_b;
if (init)
w83792d_write_value(client, W83792D_REG_CONFIG, 0x80);
/*
* Clear the bit6 of W83792D_REG_VID_IN_B(set it into 0):
* W83792D_REG_VID_IN_B bit6 = 0: the high/low limit of
* vin0/vin1 can be modified by user;
* W83792D_REG_VID_IN_B bit6 = 1: the high/low limit of
* vin0/vin1 auto-updated, can NOT be modified by user.
*/
vid_in_b = w83792d_read_value(client, W83792D_REG_VID_IN_B);
w83792d_write_value(client, W83792D_REG_VID_IN_B,
vid_in_b & 0xbf);
temp2_cfg = w83792d_read_value(client, W83792D_REG_TEMP2_CONFIG);
temp3_cfg = w83792d_read_value(client, W83792D_REG_TEMP3_CONFIG);
w83792d_write_value(client, W83792D_REG_TEMP2_CONFIG,
temp2_cfg & 0xe6);
w83792d_write_value(client, W83792D_REG_TEMP3_CONFIG,
temp3_cfg & 0xe6);
/* Start monitoring */
w83792d_write_value(client, W83792D_REG_CONFIG,
(w83792d_read_value(client,
W83792D_REG_CONFIG) & 0xf7)
| 0x01);
}
static struct w83792d_data *w83792d_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83792d_data *data = i2c_get_clientdata(client);
int i, j;
u8 reg_array_tmp[4], reg_tmp;
mutex_lock(&data->update_lock);
if (time_after
(jiffies - data->last_updated, (unsigned long) (HZ * 3))
|| time_before(jiffies, data->last_updated) || !data->valid) {
dev_dbg(dev, "Starting device update\n");
/* Update the voltages measured value and limits */
for (i = 0; i < 9; i++) {
data->in[i] = w83792d_read_value(client,
W83792D_REG_IN[i]);
data->in_max[i] = w83792d_read_value(client,
W83792D_REG_IN_MAX[i]);
data->in_min[i] = w83792d_read_value(client,
W83792D_REG_IN_MIN[i]);
}
data->low_bits = w83792d_read_value(client,
W83792D_REG_LOW_BITS1) +
(w83792d_read_value(client,
W83792D_REG_LOW_BITS2) << 8);
for (i = 0; i < 7; i++) {
/* Update the Fan measured value and limits */
data->fan[i] = w83792d_read_value(client,
W83792D_REG_FAN[i]);
data->fan_min[i] = w83792d_read_value(client,
W83792D_REG_FAN_MIN[i]);
/* Update the PWM/DC Value and PWM/DC flag */
data->pwm[i] = w83792d_read_value(client,
W83792D_REG_PWM[i]);
}
reg_tmp = w83792d_read_value(client, W83792D_REG_FAN_CFG);
data->pwmenable[0] = reg_tmp & 0x03;
data->pwmenable[1] = (reg_tmp>>2) & 0x03;
data->pwmenable[2] = (reg_tmp>>4) & 0x03;
for (i = 0; i < 3; i++) {
data->temp1[i] = w83792d_read_value(client,
W83792D_REG_TEMP1[i]);
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 6; j++) {
data->temp_add[i][j] = w83792d_read_value(
client, W83792D_REG_TEMP_ADD[i][j]);
}
}
/* Update the Fan Divisor */
for (i = 0; i < 4; i++) {
reg_array_tmp[i] = w83792d_read_value(client,
W83792D_REG_FAN_DIV[i]);
}
data->fan_div[0] = reg_array_tmp[0] & 0x07;
data->fan_div[1] = (reg_array_tmp[0] >> 4) & 0x07;
data->fan_div[2] = reg_array_tmp[1] & 0x07;
data->fan_div[3] = (reg_array_tmp[1] >> 4) & 0x07;
data->fan_div[4] = reg_array_tmp[2] & 0x07;
data->fan_div[5] = (reg_array_tmp[2] >> 4) & 0x07;
data->fan_div[6] = reg_array_tmp[3] & 0x07;
/* Update the realtime status */
data->alarms = w83792d_read_value(client, W83792D_REG_ALARM1) +
(w83792d_read_value(client, W83792D_REG_ALARM2) << 8) +
(w83792d_read_value(client, W83792D_REG_ALARM3) << 16);
/* Update CaseOpen status and it's CLR_CHS. */
data->chassis = (w83792d_read_value(client,
W83792D_REG_CHASSIS) >> 5) & 0x01;
data->chassis_clear = (w83792d_read_value(client,
W83792D_REG_CHASSIS_CLR) >> 7) & 0x01;
/* Update Thermal Cruise/Smart Fan I target value */
for (i = 0; i < 3; i++) {
data->thermal_cruise[i] =
w83792d_read_value(client,
W83792D_REG_THERMAL[i]) & 0x7f;
}
/* Update Smart Fan I/II tolerance */
reg_tmp = w83792d_read_value(client, W83792D_REG_TOLERANCE[0]);
data->tolerance[0] = reg_tmp & 0x0f;
data->tolerance[1] = (reg_tmp >> 4) & 0x0f;
data->tolerance[2] = w83792d_read_value(client,
W83792D_REG_TOLERANCE[2]) & 0x0f;
/* Update Smart Fan II temperature points */
for (i = 0; i < 3; i++) {
for (j = 0; j < 4; j++) {
data->sf2_points[i][j]
= w83792d_read_value(client,
W83792D_REG_POINTS[i][j]) & 0x7f;
}
}
/* Update Smart Fan II duty cycle levels */
for (i = 0; i < 3; i++) {
reg_tmp = w83792d_read_value(client,
W83792D_REG_LEVELS[i][0]);
data->sf2_levels[i][0] = reg_tmp & 0x0f;
data->sf2_levels[i][1] = (reg_tmp >> 4) & 0x0f;
reg_tmp = w83792d_read_value(client,
W83792D_REG_LEVELS[i][2]);
data->sf2_levels[i][2] = (reg_tmp >> 4) & 0x0f;
data->sf2_levels[i][3] = reg_tmp & 0x0f;
}
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
#ifdef DEBUG
w83792d_print_debug(data, dev);
#endif
return data;
}
#ifdef DEBUG
static void w83792d_print_debug(struct w83792d_data *data, struct device *dev)
{
int i = 0, j = 0;
dev_dbg(dev, "==========The following is the debug message...========\n");
dev_dbg(dev, "9 set of Voltages: =====>\n");
for (i = 0; i < 9; i++) {
dev_dbg(dev, "vin[%d] is: 0x%x\n", i, data->in[i]);
dev_dbg(dev, "vin[%d] max is: 0x%x\n", i, data->in_max[i]);
dev_dbg(dev, "vin[%d] min is: 0x%x\n", i, data->in_min[i]);
}
dev_dbg(dev, "Low Bit1 is: 0x%x\n", data->low_bits & 0xff);
dev_dbg(dev, "Low Bit2 is: 0x%x\n", data->low_bits >> 8);
dev_dbg(dev, "7 set of Fan Counts and Duty Cycles: =====>\n");
for (i = 0; i < 7; i++) {
dev_dbg(dev, "fan[%d] is: 0x%x\n", i, data->fan[i]);
dev_dbg(dev, "fan[%d] min is: 0x%x\n", i, data->fan_min[i]);
dev_dbg(dev, "pwm[%d] is: 0x%x\n", i, data->pwm[i]);
}
dev_dbg(dev, "3 set of Temperatures: =====>\n");
for (i = 0; i < 3; i++)
dev_dbg(dev, "temp1[%d] is: 0x%x\n", i, data->temp1[i]);
for (i = 0; i < 2; i++) {
for (j = 0; j < 6; j++) {
dev_dbg(dev, "temp_add[%d][%d] is: 0x%x\n", i, j,
data->temp_add[i][j]);
}
}
for (i = 0; i < 7; i++)
dev_dbg(dev, "fan_div[%d] is: 0x%x\n", i, data->fan_div[i]);
dev_dbg(dev, "==========End of the debug message...================\n");
dev_dbg(dev, "\n");
}
#endif
module_i2c_driver(w83792d_driver);
MODULE_AUTHOR("Chunhao Huang @ Winbond <DZShen@Winbond.com.tw>");
MODULE_DESCRIPTION("W83792AD/D driver for linux-2.6");
MODULE_LICENSE("GPL");
| gpl-2.0 |
itgb/opCloudRouter | qca/src/linux/drivers/isdn/hisax/isurf.c | 4857 | 7830 | /* $Id: isurf.c,v 1.12.2.4 2004/01/13 21:46:03 keil Exp $
*
* low level stuff for Siemens I-Surf/I-Talk cards
*
* Author Karsten Keil
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "isac.h"
#include "isar.h"
#include "isdnl1.h"
#include <linux/isapnp.h>
static const char *ISurf_revision = "$Revision: 1.12.2.4 $";
#define byteout(addr, val) outb(val, addr)
#define bytein(addr) inb(addr)
#define ISURF_ISAR_RESET 1
#define ISURF_ISAC_RESET 2
#define ISURF_ISAR_EA 4
#define ISURF_ARCOFI_RESET 8
#define ISURF_RESET (ISURF_ISAR_RESET | ISURF_ISAC_RESET | ISURF_ARCOFI_RESET)
#define ISURF_ISAR_OFFSET 0
#define ISURF_ISAC_OFFSET 0x100
#define ISURF_IOMEM_SIZE 0x400
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readb(cs->hw.isurf.isac + offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writeb(value, cs->hw.isurf.isac + offset); mb();
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
register int i;
for (i = 0; i < size; i++)
data[i] = readb(cs->hw.isurf.isac);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
register int i;
for (i = 0; i < size; i++) {
writeb(data[i], cs->hw.isurf.isac); mb();
}
}
/* ISAR access routines
* mode = 0 access with IRQ on
* mode = 1 access with IRQ off
* mode = 2 access with IRQ off and using last offset
*/
static u_char
ReadISAR(struct IsdnCardState *cs, int mode, u_char offset)
{
return (readb(cs->hw.isurf.isar + offset));
}
static void
WriteISAR(struct IsdnCardState *cs, int mode, u_char offset, u_char value)
{
writeb(value, cs->hw.isurf.isar + offset); mb();
}
static irqreturn_t
isurf_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
int cnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
val = readb(cs->hw.isurf.isar + ISAR_IRQBIT);
Start_ISAR:
if (val & ISAR_IRQSTA)
isar_int_main(cs);
val = readb(cs->hw.isurf.isac + ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
val = readb(cs->hw.isurf.isar + ISAR_IRQBIT);
if ((val & ISAR_IRQSTA) && --cnt) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "ISAR IntStat after IntRoutine");
goto Start_ISAR;
}
val = readb(cs->hw.isurf.isac + ISAC_ISTA);
if (val && --cnt) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
if (!cnt)
printk(KERN_WARNING "ISurf IRQ LOOP\n");
writeb(0, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
writeb(0xFF, cs->hw.isurf.isac + ISAC_MASK); mb();
writeb(0, cs->hw.isurf.isac + ISAC_MASK); mb();
writeb(ISAR_IRQMSK, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_isurf(struct IsdnCardState *cs)
{
release_region(cs->hw.isurf.reset, 1);
iounmap(cs->hw.isurf.isar);
release_mem_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE);
}
static void
reset_isurf(struct IsdnCardState *cs, u_char chips)
{
printk(KERN_INFO "ISurf: resetting card\n");
byteout(cs->hw.isurf.reset, chips); /* Reset On */
mdelay(10);
byteout(cs->hw.isurf.reset, ISURF_ISAR_EA); /* Reset Off */
mdelay(10);
}
static int
ISurf_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
reset_isurf(cs, ISURF_RESET);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_RELEASE:
release_io_isurf(cs);
return (0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
reset_isurf(cs, ISURF_RESET);
clear_pending_isac_ints(cs);
writeb(0, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
initisac(cs);
initisar(cs);
/* Reenable ISAC IRQ */
cs->writeisac(cs, ISAC_MASK, 0);
/* RESET Receiver and Transmitter */
cs->writeisac(cs, ISAC_CMDR, 0x41);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_TEST:
return (0);
}
return (0);
}
static int
isurf_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
int ret;
u_long flags;
if ((ic->command == ISDN_CMD_IOCTL) && (ic->arg == 9)) {
ret = isar_auxcmd(cs, ic);
spin_lock_irqsave(&cs->lock, flags);
if (!ret) {
reset_isurf(cs, ISURF_ISAR_EA | ISURF_ISAC_RESET |
ISURF_ARCOFI_RESET);
initisac(cs);
cs->writeisac(cs, ISAC_MASK, 0);
cs->writeisac(cs, ISAC_CMDR, 0x41);
}
spin_unlock_irqrestore(&cs->lock, flags);
return (ret);
}
return (isar_auxcmd(cs, ic));
}
#ifdef __ISAPNP__
static struct pnp_card *pnp_c __devinitdata = NULL;
#endif
int __devinit
setup_isurf(struct IsdnCard *card)
{
int ver;
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, ISurf_revision);
printk(KERN_INFO "HiSax: ISurf driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_ISURF)
return (0);
if (card->para[1] && card->para[2]) {
cs->hw.isurf.reset = card->para[1];
cs->hw.isurf.phymem = card->para[2];
cs->irq = card->para[0];
} else {
#ifdef __ISAPNP__
if (isapnp_present()) {
struct pnp_dev *pnp_d = NULL;
int err;
cs->subtyp = 0;
if ((pnp_c = pnp_find_card(
ISAPNP_VENDOR('S', 'I', 'E'),
ISAPNP_FUNCTION(0x0010), pnp_c))) {
if (!(pnp_d = pnp_find_dev(pnp_c,
ISAPNP_VENDOR('S', 'I', 'E'),
ISAPNP_FUNCTION(0x0010), pnp_d))) {
printk(KERN_ERR "ISurfPnP: PnP error card found, no device\n");
return (0);
}
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
cs->irq = pnp_irq(pnp_d, 0);
if (!cs->irq || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n",
cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem);
pnp_disable_dev(pnp_d);
return (0);
}
} else {
printk(KERN_INFO "ISurfPnP: no ISAPnP card found\n");
return (0);
}
} else {
printk(KERN_INFO "ISurfPnP: no ISAPnP bus found\n");
return (0);
}
#else
printk(KERN_WARNING "HiSax: Siemens I-Surf port/mem not set\n");
return (0);
#endif
}
if (!request_region(cs->hw.isurf.reset, 1, "isurf isdn")) {
printk(KERN_WARNING
"HiSax: Siemens I-Surf config port %x already in use\n",
cs->hw.isurf.reset);
return (0);
}
if (!request_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE, "isurf iomem")) {
printk(KERN_WARNING "HiSax: Siemens I-Surf memory region "
"%lx-%lx already in use\n",
cs->hw.isurf.phymem,
cs->hw.isurf.phymem + ISURF_IOMEM_SIZE);
release_region(cs->hw.isurf.reset, 1);
return (0);
}
cs->hw.isurf.isar = ioremap(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE);
cs->hw.isurf.isac = cs->hw.isurf.isar + ISURF_ISAC_OFFSET;
printk(KERN_INFO
"ISurf: defined at 0x%x 0x%lx IRQ %d\n",
cs->hw.isurf.reset,
cs->hw.isurf.phymem,
cs->irq);
setup_isac(cs);
cs->cardmsg = &ISurf_card_msg;
cs->irq_func = &isurf_interrupt;
cs->auxcmd = &isurf_auxcmd;
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->bcs[0].hw.isar.reg = &cs->hw.isurf.isar_r;
cs->bcs[1].hw.isar.reg = &cs->hw.isurf.isar_r;
test_and_set_bit(HW_ISAR, &cs->HW_Flags);
ISACVersion(cs, "ISurf:");
cs->BC_Read_Reg = &ReadISAR;
cs->BC_Write_Reg = &WriteISAR;
cs->BC_Send_Data = &isar_fill_fifo;
ver = ISARVersion(cs, "ISurf:");
if (ver < 0) {
printk(KERN_WARNING
"ISurf: wrong ISAR version (ret = %d)\n", ver);
release_io_isurf(cs);
return (0);
}
return (1);
}
| gpl-2.0 |
DirtyDevs/android_kernel_motorola_ghost | sound/isa/wss/wss_lib.c | 5113 | 65871 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Routines for control of CS4231(A)/CS4232/InterWave & compatible chips
*
* Bugs:
* - sometimes record brokes playback with WSS portion of
* Yamaha OPL3-SA3 chip
* - CS4231 (GUS MAX) - still trouble with occasional noises
* - broken initialization?
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/wss.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/irq.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Routines for control of CS4231(A)/CS4232/InterWave & compatible chips");
MODULE_LICENSE("GPL");
#if 0
#define SNDRV_DEBUG_MCE
#endif
/*
* Some variables
*/
static unsigned char freq_bits[14] = {
/* 5510 */ 0x00 | CS4231_XTAL2,
/* 6620 */ 0x0E | CS4231_XTAL2,
/* 8000 */ 0x00 | CS4231_XTAL1,
/* 9600 */ 0x0E | CS4231_XTAL1,
/* 11025 */ 0x02 | CS4231_XTAL2,
/* 16000 */ 0x02 | CS4231_XTAL1,
/* 18900 */ 0x04 | CS4231_XTAL2,
/* 22050 */ 0x06 | CS4231_XTAL2,
/* 27042 */ 0x04 | CS4231_XTAL1,
/* 32000 */ 0x06 | CS4231_XTAL1,
/* 33075 */ 0x0C | CS4231_XTAL2,
/* 37800 */ 0x08 | CS4231_XTAL2,
/* 44100 */ 0x0A | CS4231_XTAL2,
/* 48000 */ 0x0C | CS4231_XTAL1
};
static unsigned int rates[14] = {
5510, 6620, 8000, 9600, 11025, 16000, 18900, 22050,
27042, 32000, 33075, 37800, 44100, 48000
};
static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
static int snd_wss_xrate(struct snd_pcm_runtime *runtime)
{
return snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
}
static unsigned char snd_wss_original_image[32] =
{
0x00, /* 00/00 - lic */
0x00, /* 01/01 - ric */
0x9f, /* 02/02 - la1ic */
0x9f, /* 03/03 - ra1ic */
0x9f, /* 04/04 - la2ic */
0x9f, /* 05/05 - ra2ic */
0xbf, /* 06/06 - loc */
0xbf, /* 07/07 - roc */
0x20, /* 08/08 - pdfr */
CS4231_AUTOCALIB, /* 09/09 - ic */
0x00, /* 0a/10 - pc */
0x00, /* 0b/11 - ti */
CS4231_MODE2, /* 0c/12 - mi */
0xfc, /* 0d/13 - lbc */
0x00, /* 0e/14 - pbru */
0x00, /* 0f/15 - pbrl */
0x80, /* 10/16 - afei */
0x01, /* 11/17 - afeii */
0x9f, /* 12/18 - llic */
0x9f, /* 13/19 - rlic */
0x00, /* 14/20 - tlb */
0x00, /* 15/21 - thb */
0x00, /* 16/22 - la3mic/reserved */
0x00, /* 17/23 - ra3mic/reserved */
0x00, /* 18/24 - afs */
0x00, /* 19/25 - lamoc/version */
0xcf, /* 1a/26 - mioc */
0x00, /* 1b/27 - ramoc/reserved */
0x20, /* 1c/28 - cdfr */
0x00, /* 1d/29 - res4 */
0x00, /* 1e/30 - cbru */
0x00, /* 1f/31 - cbrl */
};
static unsigned char snd_opti93x_original_image[32] =
{
0x00, /* 00/00 - l_mixout_outctrl */
0x00, /* 01/01 - r_mixout_outctrl */
0x88, /* 02/02 - l_cd_inctrl */
0x88, /* 03/03 - r_cd_inctrl */
0x88, /* 04/04 - l_a1/fm_inctrl */
0x88, /* 05/05 - r_a1/fm_inctrl */
0x80, /* 06/06 - l_dac_inctrl */
0x80, /* 07/07 - r_dac_inctrl */
0x00, /* 08/08 - ply_dataform_reg */
0x00, /* 09/09 - if_conf */
0x00, /* 0a/10 - pin_ctrl */
0x00, /* 0b/11 - err_init_reg */
0x0a, /* 0c/12 - id_reg */
0x00, /* 0d/13 - reserved */
0x00, /* 0e/14 - ply_upcount_reg */
0x00, /* 0f/15 - ply_lowcount_reg */
0x88, /* 10/16 - reserved/l_a1_inctrl */
0x88, /* 11/17 - reserved/r_a1_inctrl */
0x88, /* 12/18 - l_line_inctrl */
0x88, /* 13/19 - r_line_inctrl */
0x88, /* 14/20 - l_mic_inctrl */
0x88, /* 15/21 - r_mic_inctrl */
0x80, /* 16/22 - l_out_outctrl */
0x80, /* 17/23 - r_out_outctrl */
0x00, /* 18/24 - reserved */
0x00, /* 19/25 - reserved */
0x00, /* 1a/26 - reserved */
0x00, /* 1b/27 - reserved */
0x00, /* 1c/28 - cap_dataform_reg */
0x00, /* 1d/29 - reserved */
0x00, /* 1e/30 - cap_upcount_reg */
0x00 /* 1f/31 - cap_lowcount_reg */
};
/*
* Basic I/O functions
*/
static inline void wss_outb(struct snd_wss *chip, u8 offset, u8 val)
{
outb(val, chip->port + offset);
}
static inline u8 wss_inb(struct snd_wss *chip, u8 offset)
{
return inb(chip->port + offset);
}
static void snd_wss_wait(struct snd_wss *chip)
{
int timeout;
for (timeout = 250;
timeout > 0 && (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT);
timeout--)
udelay(100);
}
static void snd_wss_dout(struct snd_wss *chip, unsigned char reg,
unsigned char value)
{
int timeout;
for (timeout = 250;
timeout > 0 && (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT);
timeout--)
udelay(10);
wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | reg);
wss_outb(chip, CS4231P(REG), value);
mb();
}
void snd_wss_out(struct snd_wss *chip, unsigned char reg, unsigned char value)
{
snd_wss_wait(chip);
#ifdef CONFIG_SND_DEBUG
if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT)
snd_printk(KERN_DEBUG "out: auto calibration time out "
"- reg = 0x%x, value = 0x%x\n", reg, value);
#endif
wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | reg);
wss_outb(chip, CS4231P(REG), value);
chip->image[reg] = value;
mb();
snd_printdd("codec out - reg 0x%x = 0x%x\n",
chip->mce_bit | reg, value);
}
EXPORT_SYMBOL(snd_wss_out);
unsigned char snd_wss_in(struct snd_wss *chip, unsigned char reg)
{
snd_wss_wait(chip);
#ifdef CONFIG_SND_DEBUG
if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT)
snd_printk(KERN_DEBUG "in: auto calibration time out "
"- reg = 0x%x\n", reg);
#endif
wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | reg);
mb();
return wss_inb(chip, CS4231P(REG));
}
EXPORT_SYMBOL(snd_wss_in);
void snd_cs4236_ext_out(struct snd_wss *chip, unsigned char reg,
unsigned char val)
{
wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | 0x17);
wss_outb(chip, CS4231P(REG),
reg | (chip->image[CS4236_EXT_REG] & 0x01));
wss_outb(chip, CS4231P(REG), val);
chip->eimage[CS4236_REG(reg)] = val;
#if 0
printk(KERN_DEBUG "ext out : reg = 0x%x, val = 0x%x\n", reg, val);
#endif
}
EXPORT_SYMBOL(snd_cs4236_ext_out);
unsigned char snd_cs4236_ext_in(struct snd_wss *chip, unsigned char reg)
{
wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | 0x17);
wss_outb(chip, CS4231P(REG),
reg | (chip->image[CS4236_EXT_REG] & 0x01));
#if 1
return wss_inb(chip, CS4231P(REG));
#else
{
unsigned char res;
res = wss_inb(chip, CS4231P(REG));
printk(KERN_DEBUG "ext in : reg = 0x%x, val = 0x%x\n",
reg, res);
return res;
}
#endif
}
EXPORT_SYMBOL(snd_cs4236_ext_in);
#if 0
static void snd_wss_debug(struct snd_wss *chip)
{
printk(KERN_DEBUG
"CS4231 REGS: INDEX = 0x%02x "
" STATUS = 0x%02x\n",
wss_inb(chip, CS4231P(REGSEL)),
wss_inb(chip, CS4231P(STATUS)));
printk(KERN_DEBUG
" 0x00: left input = 0x%02x "
" 0x10: alt 1 (CFIG 2) = 0x%02x\n",
snd_wss_in(chip, 0x00),
snd_wss_in(chip, 0x10));
printk(KERN_DEBUG
" 0x01: right input = 0x%02x "
" 0x11: alt 2 (CFIG 3) = 0x%02x\n",
snd_wss_in(chip, 0x01),
snd_wss_in(chip, 0x11));
printk(KERN_DEBUG
" 0x02: GF1 left input = 0x%02x "
" 0x12: left line in = 0x%02x\n",
snd_wss_in(chip, 0x02),
snd_wss_in(chip, 0x12));
printk(KERN_DEBUG
" 0x03: GF1 right input = 0x%02x "
" 0x13: right line in = 0x%02x\n",
snd_wss_in(chip, 0x03),
snd_wss_in(chip, 0x13));
printk(KERN_DEBUG
" 0x04: CD left input = 0x%02x "
" 0x14: timer low = 0x%02x\n",
snd_wss_in(chip, 0x04),
snd_wss_in(chip, 0x14));
printk(KERN_DEBUG
" 0x05: CD right input = 0x%02x "
" 0x15: timer high = 0x%02x\n",
snd_wss_in(chip, 0x05),
snd_wss_in(chip, 0x15));
printk(KERN_DEBUG
" 0x06: left output = 0x%02x "
" 0x16: left MIC (PnP) = 0x%02x\n",
snd_wss_in(chip, 0x06),
snd_wss_in(chip, 0x16));
printk(KERN_DEBUG
" 0x07: right output = 0x%02x "
" 0x17: right MIC (PnP) = 0x%02x\n",
snd_wss_in(chip, 0x07),
snd_wss_in(chip, 0x17));
printk(KERN_DEBUG
" 0x08: playback format = 0x%02x "
" 0x18: IRQ status = 0x%02x\n",
snd_wss_in(chip, 0x08),
snd_wss_in(chip, 0x18));
printk(KERN_DEBUG
" 0x09: iface (CFIG 1) = 0x%02x "
" 0x19: left line out = 0x%02x\n",
snd_wss_in(chip, 0x09),
snd_wss_in(chip, 0x19));
printk(KERN_DEBUG
" 0x0a: pin control = 0x%02x "
" 0x1a: mono control = 0x%02x\n",
snd_wss_in(chip, 0x0a),
snd_wss_in(chip, 0x1a));
printk(KERN_DEBUG
" 0x0b: init & status = 0x%02x "
" 0x1b: right line out = 0x%02x\n",
snd_wss_in(chip, 0x0b),
snd_wss_in(chip, 0x1b));
printk(KERN_DEBUG
" 0x0c: revision & mode = 0x%02x "
" 0x1c: record format = 0x%02x\n",
snd_wss_in(chip, 0x0c),
snd_wss_in(chip, 0x1c));
printk(KERN_DEBUG
" 0x0d: loopback = 0x%02x "
" 0x1d: var freq (PnP) = 0x%02x\n",
snd_wss_in(chip, 0x0d),
snd_wss_in(chip, 0x1d));
printk(KERN_DEBUG
" 0x0e: ply upr count = 0x%02x "
" 0x1e: ply lwr count = 0x%02x\n",
snd_wss_in(chip, 0x0e),
snd_wss_in(chip, 0x1e));
printk(KERN_DEBUG
" 0x0f: rec upr count = 0x%02x "
" 0x1f: rec lwr count = 0x%02x\n",
snd_wss_in(chip, 0x0f),
snd_wss_in(chip, 0x1f));
}
#endif
/*
* CS4231 detection / MCE routines
*/
static void snd_wss_busy_wait(struct snd_wss *chip)
{
int timeout;
/* huh.. looks like this sequence is proper for CS4231A chip (GUS MAX) */
for (timeout = 5; timeout > 0; timeout--)
wss_inb(chip, CS4231P(REGSEL));
/* end of cleanup sequence */
for (timeout = 25000;
timeout > 0 && (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT);
timeout--)
udelay(10);
}
void snd_wss_mce_up(struct snd_wss *chip)
{
unsigned long flags;
int timeout;
snd_wss_wait(chip);
#ifdef CONFIG_SND_DEBUG
if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT)
snd_printk(KERN_DEBUG
"mce_up - auto calibration time out (0)\n");
#endif
spin_lock_irqsave(&chip->reg_lock, flags);
chip->mce_bit |= CS4231_MCE;
timeout = wss_inb(chip, CS4231P(REGSEL));
if (timeout == 0x80)
snd_printk(KERN_DEBUG "mce_up [0x%lx]: "
"serious init problem - codec still busy\n",
chip->port);
if (!(timeout & CS4231_MCE))
wss_outb(chip, CS4231P(REGSEL),
chip->mce_bit | (timeout & 0x1f));
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
EXPORT_SYMBOL(snd_wss_mce_up);
void snd_wss_mce_down(struct snd_wss *chip)
{
unsigned long flags;
unsigned long end_time;
int timeout;
int hw_mask = WSS_HW_CS4231_MASK | WSS_HW_CS4232_MASK | WSS_HW_AD1848;
snd_wss_busy_wait(chip);
#ifdef CONFIG_SND_DEBUG
if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT)
snd_printk(KERN_DEBUG "mce_down [0x%lx] - "
"auto calibration time out (0)\n",
(long)CS4231P(REGSEL));
#endif
spin_lock_irqsave(&chip->reg_lock, flags);
chip->mce_bit &= ~CS4231_MCE;
timeout = wss_inb(chip, CS4231P(REGSEL));
wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | (timeout & 0x1f));
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (timeout == 0x80)
snd_printk(KERN_DEBUG "mce_down [0x%lx]: "
"serious init problem - codec still busy\n",
chip->port);
if ((timeout & CS4231_MCE) == 0 || !(chip->hardware & hw_mask))
return;
/*
* Wait for (possible -- during init auto-calibration may not be set)
* calibration process to start. Needs up to 5 sample periods on AD1848
* which at the slowest possible rate of 5.5125 kHz means 907 us.
*/
msleep(1);
snd_printdd("(1) jiffies = %lu\n", jiffies);
/* check condition up to 250 ms */
end_time = jiffies + msecs_to_jiffies(250);
while (snd_wss_in(chip, CS4231_TEST_INIT) &
CS4231_CALIB_IN_PROGRESS) {
if (time_after(jiffies, end_time)) {
snd_printk(KERN_ERR "mce_down - "
"auto calibration time out (2)\n");
return;
}
msleep(1);
}
snd_printdd("(2) jiffies = %lu\n", jiffies);
/* check condition up to 100 ms */
end_time = jiffies + msecs_to_jiffies(100);
while (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) {
if (time_after(jiffies, end_time)) {
snd_printk(KERN_ERR "mce_down - auto calibration time out (3)\n");
return;
}
msleep(1);
}
snd_printdd("(3) jiffies = %lu\n", jiffies);
snd_printd("mce_down - exit = 0x%x\n", wss_inb(chip, CS4231P(REGSEL)));
}
EXPORT_SYMBOL(snd_wss_mce_down);
static unsigned int snd_wss_get_count(unsigned char format, unsigned int size)
{
switch (format & 0xe0) {
case CS4231_LINEAR_16:
case CS4231_LINEAR_16_BIG:
size >>= 1;
break;
case CS4231_ADPCM_16:
return size >> 2;
}
if (format & CS4231_STEREO)
size >>= 1;
return size;
}
static int snd_wss_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
int result = 0;
unsigned int what;
struct snd_pcm_substream *s;
int do_start;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
do_start = 1; break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
do_start = 0; break;
default:
return -EINVAL;
}
what = 0;
snd_pcm_group_for_each_entry(s, substream) {
if (s == chip->playback_substream) {
what |= CS4231_PLAYBACK_ENABLE;
snd_pcm_trigger_done(s, substream);
} else if (s == chip->capture_substream) {
what |= CS4231_RECORD_ENABLE;
snd_pcm_trigger_done(s, substream);
}
}
spin_lock(&chip->reg_lock);
if (do_start) {
chip->image[CS4231_IFACE_CTRL] |= what;
if (chip->trigger)
chip->trigger(chip, what, 1);
} else {
chip->image[CS4231_IFACE_CTRL] &= ~what;
if (chip->trigger)
chip->trigger(chip, what, 0);
}
snd_wss_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]);
spin_unlock(&chip->reg_lock);
#if 0
snd_wss_debug(chip);
#endif
return result;
}
/*
* CODEC I/O
*/
static unsigned char snd_wss_get_rate(unsigned int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(rates); i++)
if (rate == rates[i])
return freq_bits[i];
// snd_BUG();
return freq_bits[ARRAY_SIZE(rates) - 1];
}
static unsigned char snd_wss_get_format(struct snd_wss *chip,
int format,
int channels)
{
unsigned char rformat;
rformat = CS4231_LINEAR_8;
switch (format) {
case SNDRV_PCM_FORMAT_MU_LAW: rformat = CS4231_ULAW_8; break;
case SNDRV_PCM_FORMAT_A_LAW: rformat = CS4231_ALAW_8; break;
case SNDRV_PCM_FORMAT_S16_LE: rformat = CS4231_LINEAR_16; break;
case SNDRV_PCM_FORMAT_S16_BE: rformat = CS4231_LINEAR_16_BIG; break;
case SNDRV_PCM_FORMAT_IMA_ADPCM: rformat = CS4231_ADPCM_16; break;
}
if (channels > 1)
rformat |= CS4231_STEREO;
#if 0
snd_printk(KERN_DEBUG "get_format: 0x%x (mode=0x%x)\n", format, mode);
#endif
return rformat;
}
static void snd_wss_calibrate_mute(struct snd_wss *chip, int mute)
{
unsigned long flags;
mute = mute ? 0x80 : 0;
spin_lock_irqsave(&chip->reg_lock, flags);
if (chip->calibrate_mute == mute) {
spin_unlock_irqrestore(&chip->reg_lock, flags);
return;
}
if (!mute) {
snd_wss_dout(chip, CS4231_LEFT_INPUT,
chip->image[CS4231_LEFT_INPUT]);
snd_wss_dout(chip, CS4231_RIGHT_INPUT,
chip->image[CS4231_RIGHT_INPUT]);
snd_wss_dout(chip, CS4231_LOOPBACK,
chip->image[CS4231_LOOPBACK]);
} else {
snd_wss_dout(chip, CS4231_LEFT_INPUT,
0);
snd_wss_dout(chip, CS4231_RIGHT_INPUT,
0);
snd_wss_dout(chip, CS4231_LOOPBACK,
0xfd);
}
snd_wss_dout(chip, CS4231_AUX1_LEFT_INPUT,
mute | chip->image[CS4231_AUX1_LEFT_INPUT]);
snd_wss_dout(chip, CS4231_AUX1_RIGHT_INPUT,
mute | chip->image[CS4231_AUX1_RIGHT_INPUT]);
snd_wss_dout(chip, CS4231_AUX2_LEFT_INPUT,
mute | chip->image[CS4231_AUX2_LEFT_INPUT]);
snd_wss_dout(chip, CS4231_AUX2_RIGHT_INPUT,
mute | chip->image[CS4231_AUX2_RIGHT_INPUT]);
snd_wss_dout(chip, CS4231_LEFT_OUTPUT,
mute | chip->image[CS4231_LEFT_OUTPUT]);
snd_wss_dout(chip, CS4231_RIGHT_OUTPUT,
mute | chip->image[CS4231_RIGHT_OUTPUT]);
if (!(chip->hardware & WSS_HW_AD1848_MASK)) {
snd_wss_dout(chip, CS4231_LEFT_LINE_IN,
mute | chip->image[CS4231_LEFT_LINE_IN]);
snd_wss_dout(chip, CS4231_RIGHT_LINE_IN,
mute | chip->image[CS4231_RIGHT_LINE_IN]);
snd_wss_dout(chip, CS4231_MONO_CTRL,
mute ? 0xc0 : chip->image[CS4231_MONO_CTRL]);
}
if (chip->hardware == WSS_HW_INTERWAVE) {
snd_wss_dout(chip, CS4231_LEFT_MIC_INPUT,
mute | chip->image[CS4231_LEFT_MIC_INPUT]);
snd_wss_dout(chip, CS4231_RIGHT_MIC_INPUT,
mute | chip->image[CS4231_RIGHT_MIC_INPUT]);
snd_wss_dout(chip, CS4231_LINE_LEFT_OUTPUT,
mute | chip->image[CS4231_LINE_LEFT_OUTPUT]);
snd_wss_dout(chip, CS4231_LINE_RIGHT_OUTPUT,
mute | chip->image[CS4231_LINE_RIGHT_OUTPUT]);
}
chip->calibrate_mute = mute;
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
static void snd_wss_playback_format(struct snd_wss *chip,
struct snd_pcm_hw_params *params,
unsigned char pdfr)
{
unsigned long flags;
int full_calib = 1;
mutex_lock(&chip->mce_mutex);
if (chip->hardware == WSS_HW_CS4231A ||
(chip->hardware & WSS_HW_CS4232_MASK)) {
spin_lock_irqsave(&chip->reg_lock, flags);
if ((chip->image[CS4231_PLAYBK_FORMAT] & 0x0f) == (pdfr & 0x0f)) { /* rate is same? */
snd_wss_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1] | 0x10);
chip->image[CS4231_PLAYBK_FORMAT] = pdfr;
snd_wss_out(chip, CS4231_PLAYBK_FORMAT,
chip->image[CS4231_PLAYBK_FORMAT]);
snd_wss_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1] &= ~0x10);
udelay(100); /* Fixes audible clicks at least on GUS MAX */
full_calib = 0;
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
} else if (chip->hardware == WSS_HW_AD1845) {
unsigned rate = params_rate(params);
/*
* Program the AD1845 correctly for the playback stream.
* Note that we do NOT need to toggle the MCE bit because
* the PLAYBACK_ENABLE bit of the Interface Configuration
* register is set.
*
* NOTE: We seem to need to write to the MSB before the LSB
* to get the correct sample frequency.
*/
spin_lock_irqsave(&chip->reg_lock, flags);
snd_wss_out(chip, CS4231_PLAYBK_FORMAT, (pdfr & 0xf0));
snd_wss_out(chip, AD1845_UPR_FREQ_SEL, (rate >> 8) & 0xff);
snd_wss_out(chip, AD1845_LWR_FREQ_SEL, rate & 0xff);
full_calib = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
if (full_calib) {
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
if (chip->hardware != WSS_HW_INTERWAVE && !chip->single_dma) {
if (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE)
pdfr = (pdfr & 0xf0) |
(chip->image[CS4231_REC_FORMAT] & 0x0f);
} else {
chip->image[CS4231_PLAYBK_FORMAT] = pdfr;
}
snd_wss_out(chip, CS4231_PLAYBK_FORMAT, pdfr);
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (chip->hardware == WSS_HW_OPL3SA2)
udelay(100); /* this seems to help */
snd_wss_mce_down(chip);
}
mutex_unlock(&chip->mce_mutex);
}
static void snd_wss_capture_format(struct snd_wss *chip,
struct snd_pcm_hw_params *params,
unsigned char cdfr)
{
unsigned long flags;
int full_calib = 1;
mutex_lock(&chip->mce_mutex);
if (chip->hardware == WSS_HW_CS4231A ||
(chip->hardware & WSS_HW_CS4232_MASK)) {
spin_lock_irqsave(&chip->reg_lock, flags);
if ((chip->image[CS4231_PLAYBK_FORMAT] & 0x0f) == (cdfr & 0x0f) || /* rate is same? */
(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) {
snd_wss_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1] | 0x20);
snd_wss_out(chip, CS4231_REC_FORMAT,
chip->image[CS4231_REC_FORMAT] = cdfr);
snd_wss_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1] &= ~0x20);
full_calib = 0;
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
} else if (chip->hardware == WSS_HW_AD1845) {
unsigned rate = params_rate(params);
/*
* Program the AD1845 correctly for the capture stream.
* Note that we do NOT need to toggle the MCE bit because
* the PLAYBACK_ENABLE bit of the Interface Configuration
* register is set.
*
* NOTE: We seem to need to write to the MSB before the LSB
* to get the correct sample frequency.
*/
spin_lock_irqsave(&chip->reg_lock, flags);
snd_wss_out(chip, CS4231_REC_FORMAT, (cdfr & 0xf0));
snd_wss_out(chip, AD1845_UPR_FREQ_SEL, (rate >> 8) & 0xff);
snd_wss_out(chip, AD1845_LWR_FREQ_SEL, rate & 0xff);
full_calib = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
if (full_calib) {
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
if (chip->hardware != WSS_HW_INTERWAVE &&
!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) {
if (chip->single_dma)
snd_wss_out(chip, CS4231_PLAYBK_FORMAT, cdfr);
else
snd_wss_out(chip, CS4231_PLAYBK_FORMAT,
(chip->image[CS4231_PLAYBK_FORMAT] & 0xf0) |
(cdfr & 0x0f));
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_down(chip);
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
}
if (chip->hardware & WSS_HW_AD1848_MASK)
snd_wss_out(chip, CS4231_PLAYBK_FORMAT, cdfr);
else
snd_wss_out(chip, CS4231_REC_FORMAT, cdfr);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_down(chip);
}
mutex_unlock(&chip->mce_mutex);
}
/*
* Timer interface
*/
static unsigned long snd_wss_timer_resolution(struct snd_timer *timer)
{
struct snd_wss *chip = snd_timer_chip(timer);
if (chip->hardware & WSS_HW_CS4236B_MASK)
return 14467;
else
return chip->image[CS4231_PLAYBK_FORMAT] & 1 ? 9969 : 9920;
}
static int snd_wss_timer_start(struct snd_timer *timer)
{
unsigned long flags;
unsigned int ticks;
struct snd_wss *chip = snd_timer_chip(timer);
spin_lock_irqsave(&chip->reg_lock, flags);
ticks = timer->sticks;
if ((chip->image[CS4231_ALT_FEATURE_1] & CS4231_TIMER_ENABLE) == 0 ||
(unsigned char)(ticks >> 8) != chip->image[CS4231_TIMER_HIGH] ||
(unsigned char)ticks != chip->image[CS4231_TIMER_LOW]) {
chip->image[CS4231_TIMER_HIGH] = (unsigned char) (ticks >> 8);
snd_wss_out(chip, CS4231_TIMER_HIGH,
chip->image[CS4231_TIMER_HIGH]);
chip->image[CS4231_TIMER_LOW] = (unsigned char) ticks;
snd_wss_out(chip, CS4231_TIMER_LOW,
chip->image[CS4231_TIMER_LOW]);
snd_wss_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1] |
CS4231_TIMER_ENABLE);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
static int snd_wss_timer_stop(struct snd_timer *timer)
{
unsigned long flags;
struct snd_wss *chip = snd_timer_chip(timer);
spin_lock_irqsave(&chip->reg_lock, flags);
chip->image[CS4231_ALT_FEATURE_1] &= ~CS4231_TIMER_ENABLE;
snd_wss_out(chip, CS4231_ALT_FEATURE_1,
chip->image[CS4231_ALT_FEATURE_1]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
static void snd_wss_init(struct snd_wss *chip)
{
unsigned long flags;
snd_wss_calibrate_mute(chip, 1);
snd_wss_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printk(KERN_DEBUG "init: (1)\n");
#endif
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE |
CS4231_PLAYBACK_PIO |
CS4231_RECORD_ENABLE |
CS4231_RECORD_PIO |
CS4231_CALIB_MODE);
chip->image[CS4231_IFACE_CTRL] |= CS4231_AUTOCALIB;
snd_wss_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printk(KERN_DEBUG "init: (2)\n");
#endif
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
chip->image[CS4231_IFACE_CTRL] &= ~CS4231_AUTOCALIB;
snd_wss_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]);
snd_wss_out(chip,
CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printk(KERN_DEBUG "init: (3) - afei = 0x%x\n",
chip->image[CS4231_ALT_FEATURE_1]);
#endif
spin_lock_irqsave(&chip->reg_lock, flags);
snd_wss_out(chip, CS4231_ALT_FEATURE_2,
chip->image[CS4231_ALT_FEATURE_2]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_wss_out(chip, CS4231_PLAYBK_FORMAT,
chip->image[CS4231_PLAYBK_FORMAT]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_down(chip);
#ifdef SNDRV_DEBUG_MCE
snd_printk(KERN_DEBUG "init: (4)\n");
#endif
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
if (!(chip->hardware & WSS_HW_AD1848_MASK))
snd_wss_out(chip, CS4231_REC_FORMAT,
chip->image[CS4231_REC_FORMAT]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_down(chip);
snd_wss_calibrate_mute(chip, 0);
#ifdef SNDRV_DEBUG_MCE
snd_printk(KERN_DEBUG "init: (5)\n");
#endif
}
static int snd_wss_open(struct snd_wss *chip, unsigned int mode)
{
unsigned long flags;
mutex_lock(&chip->open_mutex);
if ((chip->mode & mode) ||
((chip->mode & WSS_MODE_OPEN) && chip->single_dma)) {
mutex_unlock(&chip->open_mutex);
return -EAGAIN;
}
if (chip->mode & WSS_MODE_OPEN) {
chip->mode |= mode;
mutex_unlock(&chip->open_mutex);
return 0;
}
/* ok. now enable and ack CODEC IRQ */
spin_lock_irqsave(&chip->reg_lock, flags);
if (!(chip->hardware & WSS_HW_AD1848_MASK)) {
snd_wss_out(chip, CS4231_IRQ_STATUS,
CS4231_PLAYBACK_IRQ |
CS4231_RECORD_IRQ |
CS4231_TIMER_IRQ);
snd_wss_out(chip, CS4231_IRQ_STATUS, 0);
}
wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */
wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */
chip->image[CS4231_PIN_CTRL] |= CS4231_IRQ_ENABLE;
snd_wss_out(chip, CS4231_PIN_CTRL, chip->image[CS4231_PIN_CTRL]);
if (!(chip->hardware & WSS_HW_AD1848_MASK)) {
snd_wss_out(chip, CS4231_IRQ_STATUS,
CS4231_PLAYBACK_IRQ |
CS4231_RECORD_IRQ |
CS4231_TIMER_IRQ);
snd_wss_out(chip, CS4231_IRQ_STATUS, 0);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
chip->mode = mode;
mutex_unlock(&chip->open_mutex);
return 0;
}
static void snd_wss_close(struct snd_wss *chip, unsigned int mode)
{
unsigned long flags;
mutex_lock(&chip->open_mutex);
chip->mode &= ~mode;
if (chip->mode & WSS_MODE_OPEN) {
mutex_unlock(&chip->open_mutex);
return;
}
/* disable IRQ */
spin_lock_irqsave(&chip->reg_lock, flags);
if (!(chip->hardware & WSS_HW_AD1848_MASK))
snd_wss_out(chip, CS4231_IRQ_STATUS, 0);
wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */
wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */
chip->image[CS4231_PIN_CTRL] &= ~CS4231_IRQ_ENABLE;
snd_wss_out(chip, CS4231_PIN_CTRL, chip->image[CS4231_PIN_CTRL]);
/* now disable record & playback */
if (chip->image[CS4231_IFACE_CTRL] & (CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO |
CS4231_RECORD_ENABLE | CS4231_RECORD_PIO)) {
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO |
CS4231_RECORD_ENABLE | CS4231_RECORD_PIO);
snd_wss_out(chip, CS4231_IFACE_CTRL,
chip->image[CS4231_IFACE_CTRL]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_down(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
}
/* clear IRQ again */
if (!(chip->hardware & WSS_HW_AD1848_MASK))
snd_wss_out(chip, CS4231_IRQ_STATUS, 0);
wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */
wss_outb(chip, CS4231P(STATUS), 0); /* clear IRQ */
spin_unlock_irqrestore(&chip->reg_lock, flags);
chip->mode = 0;
mutex_unlock(&chip->open_mutex);
}
/*
* timer open/close
*/
static int snd_wss_timer_open(struct snd_timer *timer)
{
struct snd_wss *chip = snd_timer_chip(timer);
snd_wss_open(chip, WSS_MODE_TIMER);
return 0;
}
static int snd_wss_timer_close(struct snd_timer *timer)
{
struct snd_wss *chip = snd_timer_chip(timer);
snd_wss_close(chip, WSS_MODE_TIMER);
return 0;
}
static struct snd_timer_hardware snd_wss_timer_table =
{
.flags = SNDRV_TIMER_HW_AUTO,
.resolution = 9945,
.ticks = 65535,
.open = snd_wss_timer_open,
.close = snd_wss_timer_close,
.c_resolution = snd_wss_timer_resolution,
.start = snd_wss_timer_start,
.stop = snd_wss_timer_stop,
};
/*
* ok.. exported functions..
*/
static int snd_wss_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
unsigned char new_pdfr;
int err;
if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0)
return err;
new_pdfr = snd_wss_get_format(chip, params_format(hw_params),
params_channels(hw_params)) |
snd_wss_get_rate(params_rate(hw_params));
chip->set_playback_format(chip, hw_params, new_pdfr);
return 0;
}
static int snd_wss_playback_hw_free(struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
static int snd_wss_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long flags;
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
unsigned int count = snd_pcm_lib_period_bytes(substream);
spin_lock_irqsave(&chip->reg_lock, flags);
chip->p_dma_size = size;
chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO);
snd_dma_program(chip->dma1, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT);
count = snd_wss_get_count(chip->image[CS4231_PLAYBK_FORMAT], count) - 1;
snd_wss_out(chip, CS4231_PLY_LWR_CNT, (unsigned char) count);
snd_wss_out(chip, CS4231_PLY_UPR_CNT, (unsigned char) (count >> 8));
spin_unlock_irqrestore(&chip->reg_lock, flags);
#if 0
snd_wss_debug(chip);
#endif
return 0;
}
static int snd_wss_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
unsigned char new_cdfr;
int err;
if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0)
return err;
new_cdfr = snd_wss_get_format(chip, params_format(hw_params),
params_channels(hw_params)) |
snd_wss_get_rate(params_rate(hw_params));
chip->set_capture_format(chip, hw_params, new_cdfr);
return 0;
}
static int snd_wss_capture_hw_free(struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
static int snd_wss_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long flags;
unsigned int size = snd_pcm_lib_buffer_bytes(substream);
unsigned int count = snd_pcm_lib_period_bytes(substream);
spin_lock_irqsave(&chip->reg_lock, flags);
chip->c_dma_size = size;
chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_RECORD_ENABLE | CS4231_RECORD_PIO);
snd_dma_program(chip->dma2, runtime->dma_addr, size, DMA_MODE_READ | DMA_AUTOINIT);
if (chip->hardware & WSS_HW_AD1848_MASK)
count = snd_wss_get_count(chip->image[CS4231_PLAYBK_FORMAT],
count);
else
count = snd_wss_get_count(chip->image[CS4231_REC_FORMAT],
count);
count--;
if (chip->single_dma && chip->hardware != WSS_HW_INTERWAVE) {
snd_wss_out(chip, CS4231_PLY_LWR_CNT, (unsigned char) count);
snd_wss_out(chip, CS4231_PLY_UPR_CNT,
(unsigned char) (count >> 8));
} else {
snd_wss_out(chip, CS4231_REC_LWR_CNT, (unsigned char) count);
snd_wss_out(chip, CS4231_REC_UPR_CNT,
(unsigned char) (count >> 8));
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
void snd_wss_overrange(struct snd_wss *chip)
{
unsigned long flags;
unsigned char res;
spin_lock_irqsave(&chip->reg_lock, flags);
res = snd_wss_in(chip, CS4231_TEST_INIT);
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (res & (0x08 | 0x02)) /* detect overrange only above 0dB; may be user selectable? */
chip->capture_substream->runtime->overrange++;
}
EXPORT_SYMBOL(snd_wss_overrange);
irqreturn_t snd_wss_interrupt(int irq, void *dev_id)
{
struct snd_wss *chip = dev_id;
unsigned char status;
if (chip->hardware & WSS_HW_AD1848_MASK)
/* pretend it was the only possible irq for AD1848 */
status = CS4231_PLAYBACK_IRQ;
else
status = snd_wss_in(chip, CS4231_IRQ_STATUS);
if (status & CS4231_TIMER_IRQ) {
if (chip->timer)
snd_timer_interrupt(chip->timer, chip->timer->sticks);
}
if (chip->single_dma && chip->hardware != WSS_HW_INTERWAVE) {
if (status & CS4231_PLAYBACK_IRQ) {
if (chip->mode & WSS_MODE_PLAY) {
if (chip->playback_substream)
snd_pcm_period_elapsed(chip->playback_substream);
}
if (chip->mode & WSS_MODE_RECORD) {
if (chip->capture_substream) {
snd_wss_overrange(chip);
snd_pcm_period_elapsed(chip->capture_substream);
}
}
}
} else {
if (status & CS4231_PLAYBACK_IRQ) {
if (chip->playback_substream)
snd_pcm_period_elapsed(chip->playback_substream);
}
if (status & CS4231_RECORD_IRQ) {
if (chip->capture_substream) {
snd_wss_overrange(chip);
snd_pcm_period_elapsed(chip->capture_substream);
}
}
}
spin_lock(&chip->reg_lock);
status = ~CS4231_ALL_IRQS | ~status;
if (chip->hardware & WSS_HW_AD1848_MASK)
wss_outb(chip, CS4231P(STATUS), 0);
else
snd_wss_out(chip, CS4231_IRQ_STATUS, status);
spin_unlock(&chip->reg_lock);
return IRQ_HANDLED;
}
EXPORT_SYMBOL(snd_wss_interrupt);
static snd_pcm_uframes_t snd_wss_playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE))
return 0;
ptr = snd_dma_pointer(chip->dma1, chip->p_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
static snd_pcm_uframes_t snd_wss_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE))
return 0;
ptr = snd_dma_pointer(chip->dma2, chip->c_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
/*
*/
static int snd_ad1848_probe(struct snd_wss *chip)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
unsigned long flags;
unsigned char r;
unsigned short hardware = 0;
int err = 0;
int i;
while (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT) {
if (time_after(jiffies, timeout))
return -ENODEV;
cond_resched();
}
spin_lock_irqsave(&chip->reg_lock, flags);
/* set CS423x MODE 1 */
snd_wss_dout(chip, CS4231_MISC_INFO, 0);
snd_wss_dout(chip, CS4231_RIGHT_INPUT, 0x45); /* 0x55 & ~0x10 */
r = snd_wss_in(chip, CS4231_RIGHT_INPUT);
if (r != 0x45) {
/* RMGE always high on AD1847 */
if ((r & ~CS4231_ENABLE_MIC_GAIN) != 0x45) {
err = -ENODEV;
goto out;
}
hardware = WSS_HW_AD1847;
} else {
snd_wss_dout(chip, CS4231_LEFT_INPUT, 0xaa);
r = snd_wss_in(chip, CS4231_LEFT_INPUT);
/* L/RMGE always low on AT2320 */
if ((r | CS4231_ENABLE_MIC_GAIN) != 0xaa) {
err = -ENODEV;
goto out;
}
}
/* clear pending IRQ */
wss_inb(chip, CS4231P(STATUS));
wss_outb(chip, CS4231P(STATUS), 0);
mb();
if ((chip->hardware & WSS_HW_TYPE_MASK) != WSS_HW_DETECT)
goto out;
if (hardware) {
chip->hardware = hardware;
goto out;
}
r = snd_wss_in(chip, CS4231_MISC_INFO);
/* set CS423x MODE 2 */
snd_wss_dout(chip, CS4231_MISC_INFO, CS4231_MODE2);
for (i = 0; i < 16; i++) {
if (snd_wss_in(chip, i) != snd_wss_in(chip, 16 + i)) {
/* we have more than 16 registers: check ID */
if ((r & 0xf) != 0xa)
goto out_mode;
/*
* on CMI8330, CS4231_VERSION is volume control and
* can be set to 0
*/
snd_wss_dout(chip, CS4231_VERSION, 0);
r = snd_wss_in(chip, CS4231_VERSION) & 0xe7;
if (!r)
chip->hardware = WSS_HW_CMI8330;
goto out_mode;
}
}
if (r & 0x80)
chip->hardware = WSS_HW_CS4248;
else
chip->hardware = WSS_HW_AD1848;
out_mode:
snd_wss_dout(chip, CS4231_MISC_INFO, 0);
out:
spin_unlock_irqrestore(&chip->reg_lock, flags);
return err;
}
static int snd_wss_probe(struct snd_wss *chip)
{
unsigned long flags;
int i, id, rev, regnum;
unsigned char *ptr;
unsigned int hw;
id = snd_ad1848_probe(chip);
if (id < 0)
return id;
hw = chip->hardware;
if ((hw & WSS_HW_TYPE_MASK) == WSS_HW_DETECT) {
for (i = 0; i < 50; i++) {
mb();
if (wss_inb(chip, CS4231P(REGSEL)) & CS4231_INIT)
msleep(2);
else {
spin_lock_irqsave(&chip->reg_lock, flags);
snd_wss_out(chip, CS4231_MISC_INFO,
CS4231_MODE2);
id = snd_wss_in(chip, CS4231_MISC_INFO) & 0x0f;
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (id == 0x0a)
break; /* this is valid value */
}
}
snd_printdd("wss: port = 0x%lx, id = 0x%x\n", chip->port, id);
if (id != 0x0a)
return -ENODEV; /* no valid device found */
rev = snd_wss_in(chip, CS4231_VERSION) & 0xe7;
snd_printdd("CS4231: VERSION (I25) = 0x%x\n", rev);
if (rev == 0x80) {
unsigned char tmp = snd_wss_in(chip, 23);
snd_wss_out(chip, 23, ~tmp);
if (snd_wss_in(chip, 23) != tmp)
chip->hardware = WSS_HW_AD1845;
else
chip->hardware = WSS_HW_CS4231;
} else if (rev == 0xa0) {
chip->hardware = WSS_HW_CS4231A;
} else if (rev == 0xa2) {
chip->hardware = WSS_HW_CS4232;
} else if (rev == 0xb2) {
chip->hardware = WSS_HW_CS4232A;
} else if (rev == 0x83) {
chip->hardware = WSS_HW_CS4236;
} else if (rev == 0x03) {
chip->hardware = WSS_HW_CS4236B;
} else {
snd_printk(KERN_ERR
"unknown CS chip with version 0x%x\n", rev);
return -ENODEV; /* unknown CS4231 chip? */
}
}
spin_lock_irqsave(&chip->reg_lock, flags);
wss_inb(chip, CS4231P(STATUS)); /* clear any pendings IRQ */
wss_outb(chip, CS4231P(STATUS), 0);
mb();
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (!(chip->hardware & WSS_HW_AD1848_MASK))
chip->image[CS4231_MISC_INFO] = CS4231_MODE2;
switch (chip->hardware) {
case WSS_HW_INTERWAVE:
chip->image[CS4231_MISC_INFO] = CS4231_IW_MODE3;
break;
case WSS_HW_CS4235:
case WSS_HW_CS4236B:
case WSS_HW_CS4237B:
case WSS_HW_CS4238B:
case WSS_HW_CS4239:
if (hw == WSS_HW_DETECT3)
chip->image[CS4231_MISC_INFO] = CS4231_4236_MODE3;
else
chip->hardware = WSS_HW_CS4236;
break;
}
chip->image[CS4231_IFACE_CTRL] =
(chip->image[CS4231_IFACE_CTRL] & ~CS4231_SINGLE_DMA) |
(chip->single_dma ? CS4231_SINGLE_DMA : 0);
if (chip->hardware != WSS_HW_OPTI93X) {
chip->image[CS4231_ALT_FEATURE_1] = 0x80;
chip->image[CS4231_ALT_FEATURE_2] =
chip->hardware == WSS_HW_INTERWAVE ? 0xc2 : 0x01;
}
/* enable fine grained frequency selection */
if (chip->hardware == WSS_HW_AD1845)
chip->image[AD1845_PWR_DOWN] = 8;
ptr = (unsigned char *) &chip->image;
regnum = (chip->hardware & WSS_HW_AD1848_MASK) ? 16 : 32;
snd_wss_mce_down(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
for (i = 0; i < regnum; i++) /* ok.. fill all registers */
snd_wss_out(chip, i, *ptr++);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_wss_mce_up(chip);
snd_wss_mce_down(chip);
mdelay(2);
/* ok.. try check hardware version for CS4236+ chips */
if ((hw & WSS_HW_TYPE_MASK) == WSS_HW_DETECT) {
if (chip->hardware == WSS_HW_CS4236B) {
rev = snd_cs4236_ext_in(chip, CS4236_VERSION);
snd_cs4236_ext_out(chip, CS4236_VERSION, 0xff);
id = snd_cs4236_ext_in(chip, CS4236_VERSION);
snd_cs4236_ext_out(chip, CS4236_VERSION, rev);
snd_printdd("CS4231: ext version; rev = 0x%x, id = 0x%x\n", rev, id);
if ((id & 0x1f) == 0x1d) { /* CS4235 */
chip->hardware = WSS_HW_CS4235;
switch (id >> 5) {
case 4:
case 5:
case 6:
break;
default:
snd_printk(KERN_WARNING
"unknown CS4235 chip "
"(enhanced version = 0x%x)\n",
id);
}
} else if ((id & 0x1f) == 0x0b) { /* CS4236/B */
switch (id >> 5) {
case 4:
case 5:
case 6:
case 7:
chip->hardware = WSS_HW_CS4236B;
break;
default:
snd_printk(KERN_WARNING
"unknown CS4236 chip "
"(enhanced version = 0x%x)\n",
id);
}
} else if ((id & 0x1f) == 0x08) { /* CS4237B */
chip->hardware = WSS_HW_CS4237B;
switch (id >> 5) {
case 4:
case 5:
case 6:
case 7:
break;
default:
snd_printk(KERN_WARNING
"unknown CS4237B chip "
"(enhanced version = 0x%x)\n",
id);
}
} else if ((id & 0x1f) == 0x09) { /* CS4238B */
chip->hardware = WSS_HW_CS4238B;
switch (id >> 5) {
case 5:
case 6:
case 7:
break;
default:
snd_printk(KERN_WARNING
"unknown CS4238B chip "
"(enhanced version = 0x%x)\n",
id);
}
} else if ((id & 0x1f) == 0x1e) { /* CS4239 */
chip->hardware = WSS_HW_CS4239;
switch (id >> 5) {
case 4:
case 5:
case 6:
break;
default:
snd_printk(KERN_WARNING
"unknown CS4239 chip "
"(enhanced version = 0x%x)\n",
id);
}
} else {
snd_printk(KERN_WARNING
"unknown CS4236/CS423xB chip "
"(enhanced version = 0x%x)\n", id);
}
}
}
return 0; /* all things are ok.. */
}
/*
*/
static struct snd_pcm_hardware snd_wss_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_SYNC_START),
.formats = (SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE),
.rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000,
.rate_min = 5510,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static struct snd_pcm_hardware snd_wss_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_SYNC_START),
.formats = (SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE),
.rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000,
.rate_min = 5510,
.rate_max = 48000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
/*
*/
static int snd_wss_playback_open(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
runtime->hw = snd_wss_playback;
/* hardware limitation of older chipsets */
if (chip->hardware & WSS_HW_AD1848_MASK)
runtime->hw.formats &= ~(SNDRV_PCM_FMTBIT_IMA_ADPCM |
SNDRV_PCM_FMTBIT_S16_BE);
/* hardware bug in InterWave chipset */
if (chip->hardware == WSS_HW_INTERWAVE && chip->dma1 > 3)
runtime->hw.formats &= ~SNDRV_PCM_FMTBIT_MU_LAW;
/* hardware limitation of cheap chips */
if (chip->hardware == WSS_HW_CS4235 ||
chip->hardware == WSS_HW_CS4239)
runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE;
snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.buffer_bytes_max);
snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.period_bytes_max);
if (chip->claim_dma) {
if ((err = chip->claim_dma(chip, chip->dma_private_data, chip->dma1)) < 0)
return err;
}
err = snd_wss_open(chip, WSS_MODE_PLAY);
if (err < 0) {
if (chip->release_dma)
chip->release_dma(chip, chip->dma_private_data, chip->dma1);
snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->playback_substream = substream;
snd_pcm_set_sync(substream);
chip->rate_constraint(runtime);
return 0;
}
static int snd_wss_capture_open(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
runtime->hw = snd_wss_capture;
/* hardware limitation of older chipsets */
if (chip->hardware & WSS_HW_AD1848_MASK)
runtime->hw.formats &= ~(SNDRV_PCM_FMTBIT_IMA_ADPCM |
SNDRV_PCM_FMTBIT_S16_BE);
/* hardware limitation of cheap chips */
if (chip->hardware == WSS_HW_CS4235 ||
chip->hardware == WSS_HW_CS4239 ||
chip->hardware == WSS_HW_OPTI93X)
runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE;
snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.buffer_bytes_max);
snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.period_bytes_max);
if (chip->claim_dma) {
if ((err = chip->claim_dma(chip, chip->dma_private_data, chip->dma2)) < 0)
return err;
}
err = snd_wss_open(chip, WSS_MODE_RECORD);
if (err < 0) {
if (chip->release_dma)
chip->release_dma(chip, chip->dma_private_data, chip->dma2);
snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->capture_substream = substream;
snd_pcm_set_sync(substream);
chip->rate_constraint(runtime);
return 0;
}
static int snd_wss_playback_close(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
chip->playback_substream = NULL;
snd_wss_close(chip, WSS_MODE_PLAY);
return 0;
}
static int snd_wss_capture_close(struct snd_pcm_substream *substream)
{
struct snd_wss *chip = snd_pcm_substream_chip(substream);
chip->capture_substream = NULL;
snd_wss_close(chip, WSS_MODE_RECORD);
return 0;
}
static void snd_wss_thinkpad_twiddle(struct snd_wss *chip, int on)
{
int tmp;
if (!chip->thinkpad_flag)
return;
outb(0x1c, AD1848_THINKPAD_CTL_PORT1);
tmp = inb(AD1848_THINKPAD_CTL_PORT2);
if (on)
/* turn it on */
tmp |= AD1848_THINKPAD_CS4248_ENABLE_BIT;
else
/* turn it off */
tmp &= ~AD1848_THINKPAD_CS4248_ENABLE_BIT;
outb(tmp, AD1848_THINKPAD_CTL_PORT2);
}
#ifdef CONFIG_PM
/* lowlevel suspend callback for CS4231 */
static void snd_wss_suspend(struct snd_wss *chip)
{
int reg;
unsigned long flags;
snd_pcm_suspend_all(chip->pcm);
spin_lock_irqsave(&chip->reg_lock, flags);
for (reg = 0; reg < 32; reg++)
chip->image[reg] = snd_wss_in(chip, reg);
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (chip->thinkpad_flag)
snd_wss_thinkpad_twiddle(chip, 0);
}
/* lowlevel resume callback for CS4231 */
static void snd_wss_resume(struct snd_wss *chip)
{
int reg;
unsigned long flags;
/* int timeout; */
if (chip->thinkpad_flag)
snd_wss_thinkpad_twiddle(chip, 1);
snd_wss_mce_up(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
for (reg = 0; reg < 32; reg++) {
switch (reg) {
case CS4231_VERSION:
break;
default:
snd_wss_out(chip, reg, chip->image[reg]);
break;
}
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
#if 1
snd_wss_mce_down(chip);
#else
/* The following is a workaround to avoid freeze after resume on TP600E.
This is the first half of copy of snd_wss_mce_down(), but doesn't
include rescheduling. -- iwai
*/
snd_wss_busy_wait(chip);
spin_lock_irqsave(&chip->reg_lock, flags);
chip->mce_bit &= ~CS4231_MCE;
timeout = wss_inb(chip, CS4231P(REGSEL));
wss_outb(chip, CS4231P(REGSEL), chip->mce_bit | (timeout & 0x1f));
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (timeout == 0x80)
snd_printk(KERN_ERR "down [0x%lx]: serious init problem "
"- codec still busy\n", chip->port);
if ((timeout & CS4231_MCE) == 0 ||
!(chip->hardware & (WSS_HW_CS4231_MASK | WSS_HW_CS4232_MASK))) {
return;
}
snd_wss_busy_wait(chip);
#endif
}
#endif /* CONFIG_PM */
static int snd_wss_free(struct snd_wss *chip)
{
release_and_free_resource(chip->res_port);
release_and_free_resource(chip->res_cport);
if (chip->irq >= 0) {
disable_irq(chip->irq);
if (!(chip->hwshare & WSS_HWSHARE_IRQ))
free_irq(chip->irq, (void *) chip);
}
if (!(chip->hwshare & WSS_HWSHARE_DMA1) && chip->dma1 >= 0) {
snd_dma_disable(chip->dma1);
free_dma(chip->dma1);
}
if (!(chip->hwshare & WSS_HWSHARE_DMA2) &&
chip->dma2 >= 0 && chip->dma2 != chip->dma1) {
snd_dma_disable(chip->dma2);
free_dma(chip->dma2);
}
if (chip->timer)
snd_device_free(chip->card, chip->timer);
kfree(chip);
return 0;
}
static int snd_wss_dev_free(struct snd_device *device)
{
struct snd_wss *chip = device->device_data;
return snd_wss_free(chip);
}
const char *snd_wss_chip_id(struct snd_wss *chip)
{
switch (chip->hardware) {
case WSS_HW_CS4231:
return "CS4231";
case WSS_HW_CS4231A:
return "CS4231A";
case WSS_HW_CS4232:
return "CS4232";
case WSS_HW_CS4232A:
return "CS4232A";
case WSS_HW_CS4235:
return "CS4235";
case WSS_HW_CS4236:
return "CS4236";
case WSS_HW_CS4236B:
return "CS4236B";
case WSS_HW_CS4237B:
return "CS4237B";
case WSS_HW_CS4238B:
return "CS4238B";
case WSS_HW_CS4239:
return "CS4239";
case WSS_HW_INTERWAVE:
return "AMD InterWave";
case WSS_HW_OPL3SA2:
return chip->card->shortname;
case WSS_HW_AD1845:
return "AD1845";
case WSS_HW_OPTI93X:
return "OPTi 93x";
case WSS_HW_AD1847:
return "AD1847";
case WSS_HW_AD1848:
return "AD1848";
case WSS_HW_CS4248:
return "CS4248";
case WSS_HW_CMI8330:
return "CMI8330/C3D";
default:
return "???";
}
}
EXPORT_SYMBOL(snd_wss_chip_id);
static int snd_wss_new(struct snd_card *card,
unsigned short hardware,
unsigned short hwshare,
struct snd_wss **rchip)
{
struct snd_wss *chip;
*rchip = NULL;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->hardware = hardware;
chip->hwshare = hwshare;
spin_lock_init(&chip->reg_lock);
mutex_init(&chip->mce_mutex);
mutex_init(&chip->open_mutex);
chip->card = card;
chip->rate_constraint = snd_wss_xrate;
chip->set_playback_format = snd_wss_playback_format;
chip->set_capture_format = snd_wss_capture_format;
if (chip->hardware == WSS_HW_OPTI93X)
memcpy(&chip->image, &snd_opti93x_original_image,
sizeof(snd_opti93x_original_image));
else
memcpy(&chip->image, &snd_wss_original_image,
sizeof(snd_wss_original_image));
if (chip->hardware & WSS_HW_AD1848_MASK) {
chip->image[CS4231_PIN_CTRL] = 0;
chip->image[CS4231_TEST_INIT] = 0;
}
*rchip = chip;
return 0;
}
int snd_wss_create(struct snd_card *card,
unsigned long port,
unsigned long cport,
int irq, int dma1, int dma2,
unsigned short hardware,
unsigned short hwshare,
struct snd_wss **rchip)
{
static struct snd_device_ops ops = {
.dev_free = snd_wss_dev_free,
};
struct snd_wss *chip;
int err;
err = snd_wss_new(card, hardware, hwshare, &chip);
if (err < 0)
return err;
chip->irq = -1;
chip->dma1 = -1;
chip->dma2 = -1;
chip->res_port = request_region(port, 4, "WSS");
if (!chip->res_port) {
snd_printk(KERN_ERR "wss: can't grab port 0x%lx\n", port);
snd_wss_free(chip);
return -EBUSY;
}
chip->port = port;
if ((long)cport >= 0) {
chip->res_cport = request_region(cport, 8, "CS4232 Control");
if (!chip->res_cport) {
snd_printk(KERN_ERR
"wss: can't grab control port 0x%lx\n", cport);
snd_wss_free(chip);
return -ENODEV;
}
}
chip->cport = cport;
if (!(hwshare & WSS_HWSHARE_IRQ))
if (request_irq(irq, snd_wss_interrupt, 0,
"WSS", (void *) chip)) {
snd_printk(KERN_ERR "wss: can't grab IRQ %d\n", irq);
snd_wss_free(chip);
return -EBUSY;
}
chip->irq = irq;
if (!(hwshare & WSS_HWSHARE_DMA1) && request_dma(dma1, "WSS - 1")) {
snd_printk(KERN_ERR "wss: can't grab DMA1 %d\n", dma1);
snd_wss_free(chip);
return -EBUSY;
}
chip->dma1 = dma1;
if (!(hwshare & WSS_HWSHARE_DMA2) && dma1 != dma2 &&
dma2 >= 0 && request_dma(dma2, "WSS - 2")) {
snd_printk(KERN_ERR "wss: can't grab DMA2 %d\n", dma2);
snd_wss_free(chip);
return -EBUSY;
}
if (dma1 == dma2 || dma2 < 0) {
chip->single_dma = 1;
chip->dma2 = chip->dma1;
} else
chip->dma2 = dma2;
if (hardware == WSS_HW_THINKPAD) {
chip->thinkpad_flag = 1;
chip->hardware = WSS_HW_DETECT; /* reset */
snd_wss_thinkpad_twiddle(chip, 1);
}
/* global setup */
if (snd_wss_probe(chip) < 0) {
snd_wss_free(chip);
return -ENODEV;
}
snd_wss_init(chip);
#if 0
if (chip->hardware & WSS_HW_CS4232_MASK) {
if (chip->res_cport == NULL)
snd_printk(KERN_ERR "CS4232 control port features are "
"not accessible\n");
}
#endif
/* Register device */
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err < 0) {
snd_wss_free(chip);
return err;
}
#ifdef CONFIG_PM
/* Power Management */
chip->suspend = snd_wss_suspend;
chip->resume = snd_wss_resume;
#endif
*rchip = chip;
return 0;
}
EXPORT_SYMBOL(snd_wss_create);
static struct snd_pcm_ops snd_wss_playback_ops = {
.open = snd_wss_playback_open,
.close = snd_wss_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_wss_playback_hw_params,
.hw_free = snd_wss_playback_hw_free,
.prepare = snd_wss_playback_prepare,
.trigger = snd_wss_trigger,
.pointer = snd_wss_playback_pointer,
};
static struct snd_pcm_ops snd_wss_capture_ops = {
.open = snd_wss_capture_open,
.close = snd_wss_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_wss_capture_hw_params,
.hw_free = snd_wss_capture_hw_free,
.prepare = snd_wss_capture_prepare,
.trigger = snd_wss_trigger,
.pointer = snd_wss_capture_pointer,
};
int snd_wss_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(chip->card, "WSS", device, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_wss_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_wss_capture_ops);
/* global setup */
pcm->private_data = chip;
pcm->info_flags = 0;
if (chip->single_dma)
pcm->info_flags |= SNDRV_PCM_INFO_HALF_DUPLEX;
if (chip->hardware != WSS_HW_INTERWAVE)
pcm->info_flags |= SNDRV_PCM_INFO_JOINT_DUPLEX;
strcpy(pcm->name, snd_wss_chip_id(chip));
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_isa_data(),
64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
chip->pcm = pcm;
if (rpcm)
*rpcm = pcm;
return 0;
}
EXPORT_SYMBOL(snd_wss_pcm);
static void snd_wss_timer_free(struct snd_timer *timer)
{
struct snd_wss *chip = timer->private_data;
chip->timer = NULL;
}
int snd_wss_timer(struct snd_wss *chip, int device, struct snd_timer **rtimer)
{
struct snd_timer *timer;
struct snd_timer_id tid;
int err;
/* Timer initialization */
tid.dev_class = SNDRV_TIMER_CLASS_CARD;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = chip->card->number;
tid.device = device;
tid.subdevice = 0;
if ((err = snd_timer_new(chip->card, "CS4231", &tid, &timer)) < 0)
return err;
strcpy(timer->name, snd_wss_chip_id(chip));
timer->private_data = chip;
timer->private_free = snd_wss_timer_free;
timer->hw = snd_wss_timer_table;
chip->timer = timer;
if (rtimer)
*rtimer = timer;
return 0;
}
EXPORT_SYMBOL(snd_wss_timer);
/*
* MIXER part
*/
static int snd_wss_info_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static char *texts[4] = {
"Line", "Aux", "Mic", "Mix"
};
static char *opl3sa_texts[4] = {
"Line", "CD", "Mic", "Mix"
};
static char *gusmax_texts[4] = {
"Line", "Synth", "Mic", "Mix"
};
char **ptexts = texts;
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
if (snd_BUG_ON(!chip->card))
return -EINVAL;
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 2;
uinfo->value.enumerated.items = 4;
if (uinfo->value.enumerated.item > 3)
uinfo->value.enumerated.item = 3;
if (!strcmp(chip->card->driver, "GUS MAX"))
ptexts = gusmax_texts;
switch (chip->hardware) {
case WSS_HW_INTERWAVE:
ptexts = gusmax_texts;
break;
case WSS_HW_OPTI93X:
case WSS_HW_OPL3SA2:
ptexts = opl3sa_texts;
break;
}
strcpy(uinfo->value.enumerated.name, ptexts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_wss_get_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
ucontrol->value.enumerated.item[0] = (chip->image[CS4231_LEFT_INPUT] & CS4231_MIXS_ALL) >> 6;
ucontrol->value.enumerated.item[1] = (chip->image[CS4231_RIGHT_INPUT] & CS4231_MIXS_ALL) >> 6;
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
static int snd_wss_put_mux(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
unsigned short left, right;
int change;
if (ucontrol->value.enumerated.item[0] > 3 ||
ucontrol->value.enumerated.item[1] > 3)
return -EINVAL;
left = ucontrol->value.enumerated.item[0] << 6;
right = ucontrol->value.enumerated.item[1] << 6;
spin_lock_irqsave(&chip->reg_lock, flags);
left = (chip->image[CS4231_LEFT_INPUT] & ~CS4231_MIXS_ALL) | left;
right = (chip->image[CS4231_RIGHT_INPUT] & ~CS4231_MIXS_ALL) | right;
change = left != chip->image[CS4231_LEFT_INPUT] ||
right != chip->image[CS4231_RIGHT_INPUT];
snd_wss_out(chip, CS4231_LEFT_INPUT, left);
snd_wss_out(chip, CS4231_RIGHT_INPUT, right);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return change;
}
int snd_wss_info_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 16) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
EXPORT_SYMBOL(snd_wss_info_single);
int snd_wss_get_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
spin_lock_irqsave(&chip->reg_lock, flags);
ucontrol->value.integer.value[0] = (chip->image[reg] >> shift) & mask;
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (invert)
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
return 0;
}
EXPORT_SYMBOL(snd_wss_get_single);
int snd_wss_put_single(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
int change;
unsigned short val;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
val = mask - val;
val <<= shift;
spin_lock_irqsave(&chip->reg_lock, flags);
val = (chip->image[reg] & ~(mask << shift)) | val;
change = val != chip->image[reg];
snd_wss_out(chip, reg, val);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return change;
}
EXPORT_SYMBOL(snd_wss_put_single);
int snd_wss_info_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 24) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
EXPORT_SYMBOL(snd_wss_info_double);
int snd_wss_get_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
spin_lock_irqsave(&chip->reg_lock, flags);
ucontrol->value.integer.value[0] = (chip->image[left_reg] >> shift_left) & mask;
ucontrol->value.integer.value[1] = (chip->image[right_reg] >> shift_right) & mask;
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (invert) {
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1];
}
return 0;
}
EXPORT_SYMBOL(snd_wss_get_double);
int snd_wss_put_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_wss *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
int change;
unsigned short val1, val2;
val1 = ucontrol->value.integer.value[0] & mask;
val2 = ucontrol->value.integer.value[1] & mask;
if (invert) {
val1 = mask - val1;
val2 = mask - val2;
}
val1 <<= shift_left;
val2 <<= shift_right;
spin_lock_irqsave(&chip->reg_lock, flags);
if (left_reg != right_reg) {
val1 = (chip->image[left_reg] & ~(mask << shift_left)) | val1;
val2 = (chip->image[right_reg] & ~(mask << shift_right)) | val2;
change = val1 != chip->image[left_reg] ||
val2 != chip->image[right_reg];
snd_wss_out(chip, left_reg, val1);
snd_wss_out(chip, right_reg, val2);
} else {
mask = (mask << shift_left) | (mask << shift_right);
val1 = (chip->image[left_reg] & ~mask) | val1 | val2;
change = val1 != chip->image[left_reg];
snd_wss_out(chip, left_reg, val1);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
return change;
}
EXPORT_SYMBOL(snd_wss_put_double);
static const DECLARE_TLV_DB_SCALE(db_scale_6bit, -9450, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_5bit_12db_max, -3450, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_rec_gain, 0, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_4bit, -4500, 300, 0);
static struct snd_kcontrol_new snd_wss_controls[] = {
WSS_DOUBLE("PCM Playback Switch", 0,
CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 7, 7, 1, 1),
WSS_DOUBLE_TLV("PCM Playback Volume", 0,
CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 0, 0, 63, 1,
db_scale_6bit),
WSS_DOUBLE("Aux Playback Switch", 0,
CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 7, 7, 1, 1),
WSS_DOUBLE_TLV("Aux Playback Volume", 0,
CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 0, 0, 31, 1,
db_scale_5bit_12db_max),
WSS_DOUBLE("Aux Playback Switch", 1,
CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 7, 7, 1, 1),
WSS_DOUBLE_TLV("Aux Playback Volume", 1,
CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 0, 0, 31, 1,
db_scale_5bit_12db_max),
WSS_DOUBLE_TLV("Capture Volume", 0, CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT,
0, 0, 15, 0, db_scale_rec_gain),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = snd_wss_info_mux,
.get = snd_wss_get_mux,
.put = snd_wss_put_mux,
},
WSS_DOUBLE("Mic Boost (+20dB)", 0,
CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 5, 5, 1, 0),
WSS_SINGLE("Loopback Capture Switch", 0,
CS4231_LOOPBACK, 0, 1, 0),
WSS_SINGLE_TLV("Loopback Capture Volume", 0, CS4231_LOOPBACK, 2, 63, 1,
db_scale_6bit),
WSS_DOUBLE("Line Playback Switch", 0,
CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 7, 7, 1, 1),
WSS_DOUBLE_TLV("Line Playback Volume", 0,
CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 0, 0, 31, 1,
db_scale_5bit_12db_max),
WSS_SINGLE("Beep Playback Switch", 0,
CS4231_MONO_CTRL, 7, 1, 1),
WSS_SINGLE_TLV("Beep Playback Volume", 0,
CS4231_MONO_CTRL, 0, 15, 1,
db_scale_4bit),
WSS_SINGLE("Mono Output Playback Switch", 0,
CS4231_MONO_CTRL, 6, 1, 1),
WSS_SINGLE("Beep Bypass Playback Switch", 0,
CS4231_MONO_CTRL, 5, 1, 0),
};
int snd_wss_mixer(struct snd_wss *chip)
{
struct snd_card *card;
unsigned int idx;
int err;
int count = ARRAY_SIZE(snd_wss_controls);
if (snd_BUG_ON(!chip || !chip->pcm))
return -EINVAL;
card = chip->card;
strcpy(card->mixername, chip->pcm->name);
/* Use only the first 11 entries on AD1848 */
if (chip->hardware & WSS_HW_AD1848_MASK)
count = 11;
/* There is no loopback on OPTI93X */
else if (chip->hardware == WSS_HW_OPTI93X)
count = 9;
for (idx = 0; idx < count; idx++) {
err = snd_ctl_add(card,
snd_ctl_new1(&snd_wss_controls[idx],
chip));
if (err < 0)
return err;
}
return 0;
}
EXPORT_SYMBOL(snd_wss_mixer);
const struct snd_pcm_ops *snd_wss_get_pcm_ops(int direction)
{
return direction == SNDRV_PCM_STREAM_PLAYBACK ?
&snd_wss_playback_ops : &snd_wss_capture_ops;
}
EXPORT_SYMBOL(snd_wss_get_pcm_ops);
/*
* INIT part
*/
static int __init alsa_wss_init(void)
{
return 0;
}
static void __exit alsa_wss_exit(void)
{
}
module_init(alsa_wss_init);
module_exit(alsa_wss_exit);
| gpl-2.0 |
CarbonROM/android_kernel_xiaomi_armani | fs/jfs/jfs_dtree.c | 7673 | 102632 | /*
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* jfs_dtree.c: directory B+-tree manager
*
* B+-tree with variable length key directory:
*
* each directory page is structured as an array of 32-byte
* directory entry slots initialized as a freelist
* to avoid search/compaction of free space at insertion.
* when an entry is inserted, a number of slots are allocated
* from the freelist as required to store variable length data
* of the entry; when the entry is deleted, slots of the entry
* are returned to freelist.
*
* leaf entry stores full name as key and file serial number
* (aka inode number) as data.
* internal/router entry stores sufffix compressed name
* as key and simple extent descriptor as data.
*
* each directory page maintains a sorted entry index table
* which stores the start slot index of sorted entries
* to allow binary search on the table.
*
* directory starts as a root/leaf page in on-disk inode
* inline data area.
* when it becomes full, it starts a leaf of a external extent
* of length of 1 block. each time the first leaf becomes full,
* it is extended rather than split (its size is doubled),
* until its length becoms 4 KBytes, from then the extent is split
* with new 4 Kbyte extent when it becomes full
* to reduce external fragmentation of small directories.
*
* blah, blah, blah, for linear scan of directory in pieces by
* readdir().
*
*
* case-insensitive directory file system
*
* names are stored in case-sensitive way in leaf entry.
* but stored, searched and compared in case-insensitive (uppercase) order
* (i.e., both search key and entry key are folded for search/compare):
* (note that case-sensitive order is BROKEN in storage, e.g.,
* sensitive: Ad, aB, aC, aD -> insensitive: aB, aC, aD, Ad
*
* entries which folds to the same key makes up a equivalent class
* whose members are stored as contiguous cluster (may cross page boundary)
* but whose order is arbitrary and acts as duplicate, e.g.,
* abc, Abc, aBc, abC)
*
* once match is found at leaf, requires scan forward/backward
* either for, in case-insensitive search, duplicate
* or for, in case-sensitive search, for exact match
*
* router entry must be created/stored in case-insensitive way
* in internal entry:
* (right most key of left page and left most key of right page
* are folded, and its suffix compression is propagated as router
* key in parent)
* (e.g., if split occurs <abc> and <aBd>, <ABD> trather than <aB>
* should be made the router key for the split)
*
* case-insensitive search:
*
* fold search key;
*
* case-insensitive search of B-tree:
* for internal entry, router key is already folded;
* for leaf entry, fold the entry key before comparison.
*
* if (leaf entry case-insensitive match found)
* if (next entry satisfies case-insensitive match)
* return EDUPLICATE;
* if (prev entry satisfies case-insensitive match)
* return EDUPLICATE;
* return match;
* else
* return no match;
*
* serialization:
* target directory inode lock is being held on entry/exit
* of all main directory service routines.
*
* log based recovery:
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_dmap.h"
#include "jfs_unicode.h"
#include "jfs_debug.h"
/* dtree split parameter */
struct dtsplit {
struct metapage *mp;
s16 index;
s16 nslot;
struct component_name *key;
ddata_t *data;
struct pxdlist *pxdlist;
};
#define DT_PAGE(IP, MP) BT_PAGE(IP, MP, dtpage_t, i_dtroot)
/* get page buffer for specified block address */
#define DT_GETPAGE(IP, BN, MP, SIZE, P, RC)\
{\
BT_GETPAGE(IP, BN, MP, dtpage_t, SIZE, P, RC, i_dtroot)\
if (!(RC))\
{\
if (((P)->header.nextindex > (((BN)==0)?DTROOTMAXSLOT:(P)->header.maxslot)) ||\
((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT)))\
{\
BT_PUTPAGE(MP);\
jfs_error((IP)->i_sb, "DT_GETPAGE: dtree page corrupt");\
MP = NULL;\
RC = -EIO;\
}\
}\
}
/* for consistency */
#define DT_PUTPAGE(MP) BT_PUTPAGE(MP)
#define DT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
BT_GETSEARCH(IP, LEAF, BN, MP, dtpage_t, P, INDEX, i_dtroot)
/*
* forward references
*/
static int dtSplitUp(tid_t tid, struct inode *ip,
struct dtsplit * split, struct btstack * btstack);
static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rxdp);
static int dtExtendPage(tid_t tid, struct inode *ip,
struct dtsplit * split, struct btstack * btstack);
static int dtSplitRoot(tid_t tid, struct inode *ip,
struct dtsplit * split, struct metapage ** rmpp);
static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
dtpage_t * fp, struct btstack * btstack);
static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
static int dtReadFirst(struct inode *ip, struct btstack * btstack);
static int dtReadNext(struct inode *ip,
loff_t * offset, struct btstack * btstack);
static int dtCompare(struct component_name * key, dtpage_t * p, int si);
static int ciCompare(struct component_name * key, dtpage_t * p, int si,
int flag);
static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
int flag);
static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, struct component_name * key, int flag);
static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
ddata_t * data, struct dt_lock **);
static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
int do_index);
static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock);
static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock);
static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock);
#define ciToUpper(c) UniStrupr((c)->name)
/*
* read_index_page()
*
* Reads a page of a directory's index table.
* Having metadata mapped into the directory inode's address space
* presents a multitude of problems. We avoid this by mapping to
* the absolute address space outside of the *_metapage routines
*/
static struct metapage *read_index_page(struct inode *inode, s64 blkno)
{
int rc;
s64 xaddr;
int xflag;
s32 xlen;
rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
if (rc || (xaddr == 0))
return NULL;
return read_metapage(inode, xaddr, PSIZE, 1);
}
/*
* get_index_page()
*
* Same as get_index_page(), but get's a new page without reading
*/
static struct metapage *get_index_page(struct inode *inode, s64 blkno)
{
int rc;
s64 xaddr;
int xflag;
s32 xlen;
rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
if (rc || (xaddr == 0))
return NULL;
return get_metapage(inode, xaddr, PSIZE, 1);
}
/*
* find_index()
*
* Returns dtree page containing directory table entry for specified
* index and pointer to its entry.
*
* mp must be released by caller.
*/
static struct dir_table_slot *find_index(struct inode *ip, u32 index,
struct metapage ** mp, s64 *lblock)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
s64 blkno;
s64 offset;
int page_offset;
struct dir_table_slot *slot;
static int maxWarnings = 10;
if (index < 2) {
if (maxWarnings) {
jfs_warn("find_entry called with index = %d", index);
maxWarnings--;
}
return NULL;
}
if (index >= jfs_ip->next_index) {
jfs_warn("find_entry called with index >= next_index");
return NULL;
}
if (jfs_dirtable_inline(ip)) {
/*
* Inline directory table
*/
*mp = NULL;
slot = &jfs_ip->i_dirtable[index - 2];
} else {
offset = (index - 2) * sizeof(struct dir_table_slot);
page_offset = offset & (PSIZE - 1);
blkno = ((offset + 1) >> L2PSIZE) <<
JFS_SBI(ip->i_sb)->l2nbperpage;
if (*mp && (*lblock != blkno)) {
release_metapage(*mp);
*mp = NULL;
}
if (!(*mp)) {
*lblock = blkno;
*mp = read_index_page(ip, blkno);
}
if (!(*mp)) {
jfs_err("free_index: error reading directory table");
return NULL;
}
slot =
(struct dir_table_slot *) ((char *) (*mp)->data +
page_offset);
}
return slot;
}
static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp,
u32 index)
{
struct tlock *tlck;
struct linelock *llck;
struct lv *lv;
tlck = txLock(tid, ip, mp, tlckDATA);
llck = (struct linelock *) tlck->lock;
if (llck->index >= llck->maxcnt)
llck = txLinelock(llck);
lv = &llck->lv[llck->index];
/*
* Linelock slot size is twice the size of directory table
* slot size. 512 entries per page.
*/
lv->offset = ((index - 2) & 511) >> 1;
lv->length = 1;
llck->index++;
}
/*
* add_index()
*
* Adds an entry to the directory index table. This is used to provide
* each directory entry with a persistent index in which to resume
* directory traversals
*/
static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
{
struct super_block *sb = ip->i_sb;
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
u64 blkno;
struct dir_table_slot *dirtab_slot;
u32 index;
struct linelock *llck;
struct lv *lv;
struct metapage *mp;
s64 offset;
uint page_offset;
struct tlock *tlck;
s64 xaddr;
ASSERT(DO_INDEX(ip));
if (jfs_ip->next_index < 2) {
jfs_warn("add_index: next_index = %d. Resetting!",
jfs_ip->next_index);
jfs_ip->next_index = 2;
}
index = jfs_ip->next_index++;
if (index <= MAX_INLINE_DIRTABLE_ENTRY) {
/*
* i_size reflects size of index table, or 8 bytes per entry.
*/
ip->i_size = (loff_t) (index - 1) << 3;
/*
* dir table fits inline within inode
*/
dirtab_slot = &jfs_ip->i_dirtable[index-2];
dirtab_slot->flag = DIR_INDEX_VALID;
dirtab_slot->slot = slot;
DTSaddress(dirtab_slot, bn);
set_cflag(COMMIT_Dirtable, ip);
return index;
}
if (index == (MAX_INLINE_DIRTABLE_ENTRY + 1)) {
struct dir_table_slot temp_table[12];
/*
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
if (dquot_alloc_block(ip, sbi->nbperpage))
goto clean_up;
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
/*
* Save the table, we're going to overwrite it with the
* xtree root
*/
memcpy(temp_table, &jfs_ip->i_dirtable, sizeof(temp_table));
/*
* Initialize empty x-tree
*/
xtInitRoot(tid, ip);
/*
* Add the first block to the xtree
*/
if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
/* This really shouldn't fail */
jfs_warn("add_index: xtInsert failed!");
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
dbFree(ip, xaddr, sbi->nbperpage);
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
mp = get_index_page(ip, 0);
if (!mp) {
jfs_err("add_index: get_metapage failed!");
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
goto clean_up;
}
tlck = txLock(tid, ip, mp, tlckDATA);
llck = (struct linelock *) & tlck->lock;
ASSERT(llck->index == 0);
lv = &llck->lv[0];
lv->offset = 0;
lv->length = 6; /* tlckDATA slot size is 16 bytes */
llck->index++;
memcpy(mp->data, temp_table, sizeof(temp_table));
mark_metapage_dirty(mp);
release_metapage(mp);
/*
* Logging is now directed by xtree tlocks
*/
clear_cflag(COMMIT_Dirtable, ip);
}
offset = (index - 2) * sizeof(struct dir_table_slot);
page_offset = offset & (PSIZE - 1);
blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage;
if (page_offset == 0) {
/*
* This will be the beginning of a new page
*/
xaddr = 0;
if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) {
jfs_warn("add_index: xtInsert failed!");
goto clean_up;
}
ip->i_size += PSIZE;
if ((mp = get_index_page(ip, blkno)))
memset(mp->data, 0, PSIZE); /* Just looks better */
else
xtTruncate(tid, ip, offset, COMMIT_PWMAP);
} else
mp = read_index_page(ip, blkno);
if (!mp) {
jfs_err("add_index: get/read_metapage failed!");
goto clean_up;
}
lock_index(tid, ip, mp, index);
dirtab_slot =
(struct dir_table_slot *) ((char *) mp->data + page_offset);
dirtab_slot->flag = DIR_INDEX_VALID;
dirtab_slot->slot = slot;
DTSaddress(dirtab_slot, bn);
mark_metapage_dirty(mp);
release_metapage(mp);
return index;
clean_up:
jfs_ip->next_index--;
return 0;
}
/*
* free_index()
*
* Marks an entry to the directory index table as free.
*/
static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next)
{
struct dir_table_slot *dirtab_slot;
s64 lblock;
struct metapage *mp = NULL;
dirtab_slot = find_index(ip, index, &mp, &lblock);
if (!dirtab_slot)
return;
dirtab_slot->flag = DIR_INDEX_FREE;
dirtab_slot->slot = dirtab_slot->addr1 = 0;
dirtab_slot->addr2 = cpu_to_le32(next);
if (mp) {
lock_index(tid, ip, mp, index);
mark_metapage_dirty(mp);
release_metapage(mp);
} else
set_cflag(COMMIT_Dirtable, ip);
}
/*
* modify_index()
*
* Changes an entry in the directory index table
*/
static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
int slot, struct metapage ** mp, s64 *lblock)
{
struct dir_table_slot *dirtab_slot;
dirtab_slot = find_index(ip, index, mp, lblock);
if (!dirtab_slot)
return;
DTSaddress(dirtab_slot, bn);
dirtab_slot->slot = slot;
if (*mp) {
lock_index(tid, ip, *mp, index);
mark_metapage_dirty(*mp);
} else
set_cflag(COMMIT_Dirtable, ip);
}
/*
* read_index()
*
* reads a directory table slot
*/
static int read_index(struct inode *ip, u32 index,
struct dir_table_slot * dirtab_slot)
{
s64 lblock;
struct metapage *mp = NULL;
struct dir_table_slot *slot;
slot = find_index(ip, index, &mp, &lblock);
if (!slot) {
return -EIO;
}
memcpy(dirtab_slot, slot, sizeof(struct dir_table_slot));
if (mp)
release_metapage(mp);
return 0;
}
/*
* dtSearch()
*
* function:
* Search for the entry with specified key
*
* parameter:
*
* return: 0 - search result on stack, leaf page pinned;
* errno - I/O error
*/
int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
struct btstack * btstack, int flag)
{
int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn;
struct metapage *mp;
dtpage_t *p;
s8 *stbl;
int base, index, lim;
struct btframe *btsp;
pxd_t *pxd;
int psize = 288; /* initial in-line directory */
ino_t inumber;
struct component_name ciKey;
struct super_block *sb = ip->i_sb;
ciKey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), GFP_NOFS);
if (!ciKey.name) {
rc = -ENOMEM;
goto dtSearch_Exit2;
}
/* uppercase search key for c-i directory */
UniStrcpy(ciKey.name, key->name);
ciKey.namlen = key->namlen;
/* only uppercase if case-insensitive support is on */
if ((JFS_SBI(sb)->mntflag & JFS_OS2) == JFS_OS2) {
ciToUpper(&ciKey);
}
BT_CLR(btstack); /* reset stack */
/* init level count for max pages to split */
btstack->nsplit = 1;
/*
* search down tree from root:
*
* between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
* internal page, child page Pi contains entry with k, Ki <= K < Kj.
*
* if entry with search key K is not found
* internal page search find the entry with largest key Ki
* less than K which point to the child page to search;
* leaf page search find the entry with smallest key Kj
* greater than K so that the returned index is the position of
* the entry to be shifted right for insertion of new entry.
* for empty tree, search key is greater than any key of the tree.
*
* by convention, root bn = 0.
*/
for (bn = 0;;) {
/* get/pin the page to search */
DT_GETPAGE(ip, bn, mp, psize, p, rc);
if (rc)
goto dtSearch_Exit1;
/* get sorted entry table of the page */
stbl = DT_GETSTBL(p);
/*
* binary search with search key K on the current page.
*/
for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
index = base + (lim >> 1);
if (p->header.flag & BT_LEAF) {
/* uppercase leaf name to compare */
cmp =
ciCompare(&ciKey, p, stbl[index],
JFS_SBI(sb)->mntflag);
} else {
/* router key is in uppercase */
cmp = dtCompare(&ciKey, p, stbl[index]);
}
if (cmp == 0) {
/*
* search hit
*/
/* search hit - leaf page:
* return the entry found
*/
if (p->header.flag & BT_LEAF) {
inumber = le32_to_cpu(
((struct ldtentry *) & p->slot[stbl[index]])->inumber);
/*
* search for JFS_LOOKUP
*/
if (flag == JFS_LOOKUP) {
*data = inumber;
rc = 0;
goto out;
}
/*
* search for JFS_CREATE
*/
if (flag == JFS_CREATE) {
*data = inumber;
rc = -EEXIST;
goto out;
}
/*
* search for JFS_REMOVE or JFS_RENAME
*/
if ((flag == JFS_REMOVE ||
flag == JFS_RENAME) &&
*data != inumber) {
rc = -ESTALE;
goto out;
}
/*
* JFS_REMOVE|JFS_FINDDIR|JFS_RENAME
*/
/* save search result */
*data = inumber;
btsp = btstack->top;
btsp->bn = bn;
btsp->index = index;
btsp->mp = mp;
rc = 0;
goto dtSearch_Exit1;
}
/* search hit - internal page:
* descend/search its child page
*/
goto getChild;
}
if (cmp > 0) {
base = index + 1;
--lim;
}
}
/*
* search miss
*
* base is the smallest index with key (Kj) greater than
* search key (K) and may be zero or (maxindex + 1) index.
*/
/*
* search miss - leaf page
*
* return location of entry (base) where new entry with
* search key K is to be inserted.
*/
if (p->header.flag & BT_LEAF) {
/*
* search for JFS_LOOKUP, JFS_REMOVE, or JFS_RENAME
*/
if (flag == JFS_LOOKUP || flag == JFS_REMOVE ||
flag == JFS_RENAME) {
rc = -ENOENT;
goto out;
}
/*
* search for JFS_CREATE|JFS_FINDDIR:
*
* save search result
*/
*data = 0;
btsp = btstack->top;
btsp->bn = bn;
btsp->index = base;
btsp->mp = mp;
rc = 0;
goto dtSearch_Exit1;
}
/*
* search miss - internal page
*
* if base is non-zero, decrement base by one to get the parent
* entry of the child page to search.
*/
index = base ? base - 1 : base;
/*
* go down to child page
*/
getChild:
/* update max. number of pages to split */
if (BT_STACK_FULL(btstack)) {
/* Something's corrupted, mark filesystem dirty so
* chkdsk will fix it.
*/
jfs_error(sb, "stack overrun in dtSearch!");
BT_STACK_DUMP(btstack);
rc = -EIO;
goto out;
}
btstack->nsplit++;
/* push (bn, index) of the parent page/entry */
BT_PUSH(btstack, bn, index);
/* get the child page block number */
pxd = (pxd_t *) & p->slot[stbl[index]];
bn = addressPXD(pxd);
psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
/* unpin the parent page */
DT_PUTPAGE(mp);
}
out:
DT_PUTPAGE(mp);
dtSearch_Exit1:
kfree(ciKey.name);
dtSearch_Exit2:
return rc;
}
/*
* dtInsert()
*
* function: insert an entry to directory tree
*
* parameter:
*
* return: 0 - success;
* errno - failure;
*/
int dtInsert(tid_t tid, struct inode *ip,
struct component_name * name, ino_t * fsn, struct btstack * btstack)
{
int rc = 0;
struct metapage *mp; /* meta-page buffer */
dtpage_t *p; /* base B+-tree index page */
s64 bn;
int index;
struct dtsplit split; /* split information */
ddata_t data;
struct dt_lock *dtlck;
int n;
struct tlock *tlck;
struct lv *lv;
/*
* retrieve search result
*
* dtSearch() returns (leaf page pinned, index at which to insert).
* n.b. dtSearch() may return index of (maxindex + 1) of
* the full page.
*/
DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
/*
* insert entry for new key
*/
if (DO_INDEX(ip)) {
if (JFS_IP(ip)->next_index == DIREND) {
DT_PUTPAGE(mp);
return -EMLINK;
}
n = NDTLEAF(name->namlen);
data.leaf.tid = tid;
data.leaf.ip = ip;
} else {
n = NDTLEAF_LEGACY(name->namlen);
data.leaf.ip = NULL; /* signifies legacy directory format */
}
data.leaf.ino = *fsn;
/*
* leaf page does not have enough room for new entry:
*
* extend/split the leaf page;
*
* dtSplitUp() will insert the entry and unpin the leaf page.
*/
if (n > p->header.freecnt) {
split.mp = mp;
split.index = index;
split.nslot = n;
split.key = name;
split.data = &data;
rc = dtSplitUp(tid, ip, &split, btstack);
return rc;
}
/*
* leaf page does have enough room for new entry:
*
* insert the new data entry into the leaf page;
*/
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the leaf page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
/* linelock header */
lv->offset = 0;
lv->length = 1;
dtlck->index++;
dtInsertEntry(p, index, name, &data, &dtlck);
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
n = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + n;
lv->length =
((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
dtlck->index++;
}
/* unpin the leaf page */
DT_PUTPAGE(mp);
return 0;
}
/*
* dtSplitUp()
*
* function: propagate insertion bottom up;
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* leaf page unpinned;
*/
static int dtSplitUp(tid_t tid,
struct inode *ip, struct dtsplit * split, struct btstack * btstack)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
int rc = 0;
struct metapage *smp;
dtpage_t *sp; /* split page */
struct metapage *rmp;
dtpage_t *rp; /* new right page split from sp */
pxd_t rpxd; /* new right page extent descriptor */
struct metapage *lmp;
dtpage_t *lp; /* left child page */
int skip; /* index of entry of insertion */
struct btframe *parent; /* parent page entry on traverse stack */
s64 xaddr, nxaddr;
int xlen, xsize;
struct pxdlist pxdlist;
pxd_t *pxd;
struct component_name key = { 0, NULL };
ddata_t *data = split->data;
int n;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int quota_allocation = 0;
/* get split page */
smp = split->mp;
sp = DT_PAGE(ip, smp);
key.name = kmalloc((JFS_NAME_MAX + 2) * sizeof(wchar_t), GFP_NOFS);
if (!key.name) {
DT_PUTPAGE(smp);
rc = -ENOMEM;
goto dtSplitUp_Exit;
}
/*
* split leaf page
*
* The split routines insert the new entry, and
* acquire txLock as appropriate.
*/
/*
* split root leaf page:
*/
if (sp->header.flag & BT_ROOT) {
/*
* allocate a single extent child page
*/
xlen = 1;
n = sbi->bsize >> L2DTSLOTSIZE;
n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */
if (n <= split->nslot)
xlen++;
if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) {
DT_PUTPAGE(smp);
goto freeKeyName;
}
pxdlist.maxnpxd = 1;
pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
PXDaddress(pxd, xaddr);
PXDlength(pxd, xlen);
split->pxdlist = &pxdlist;
rc = dtSplitRoot(tid, ip, split, &rmp);
if (rc)
dbFree(ip, xaddr, xlen);
else
DT_PUTPAGE(rmp);
DT_PUTPAGE(smp);
if (!DO_INDEX(ip))
ip->i_size = xlen << sbi->l2bsize;
goto freeKeyName;
}
/*
* extend first leaf page
*
* extend the 1st extent if less than buffer page size
* (dtExtendPage() reurns leaf page unpinned)
*/
pxd = &sp->header.self;
xlen = lengthPXD(pxd);
xsize = xlen << sbi->l2bsize;
if (xsize < PSIZE) {
xaddr = addressPXD(pxd);
n = xsize >> L2DTSLOTSIZE;
n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
if ((n + sp->header.freecnt) <= split->nslot)
n = xlen + (xlen << 1);
else
n = xlen;
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, n);
if (rc)
goto extendOut;
quota_allocation += n;
if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
(s64) n, &nxaddr)))
goto extendOut;
pxdlist.maxnpxd = 1;
pxdlist.npxd = 0;
pxd = &pxdlist.pxd[0];
PXDaddress(pxd, nxaddr)
PXDlength(pxd, xlen + n);
split->pxdlist = &pxdlist;
if ((rc = dtExtendPage(tid, ip, split, btstack))) {
nxaddr = addressPXD(pxd);
if (xaddr != nxaddr) {
/* free relocated extent */
xlen = lengthPXD(pxd);
dbFree(ip, nxaddr, (s64) xlen);
} else {
/* free extended delta */
xlen = lengthPXD(pxd) - n;
xaddr = addressPXD(pxd) + xlen;
dbFree(ip, xaddr, (s64) n);
}
} else if (!DO_INDEX(ip))
ip->i_size = lengthPXD(pxd) << sbi->l2bsize;
extendOut:
DT_PUTPAGE(smp);
goto freeKeyName;
}
/*
* split leaf page <sp> into <sp> and a new right page <rp>.
*
* return <rp> pinned and its extent descriptor <rpxd>
*/
/*
* allocate new directory page extent and
* new index page(s) to cover page split(s)
*
* allocation hint: ?
*/
n = btstack->nsplit;
pxdlist.maxnpxd = pxdlist.npxd = 0;
xlen = sbi->nbperpage;
for (pxd = pxdlist.pxd; n > 0; n--, pxd++) {
if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr)) == 0) {
PXDaddress(pxd, xaddr);
PXDlength(pxd, xlen);
pxdlist.maxnpxd++;
continue;
}
DT_PUTPAGE(smp);
/* undo allocation */
goto splitOut;
}
split->pxdlist = &pxdlist;
if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) {
DT_PUTPAGE(smp);
/* undo allocation */
goto splitOut;
}
if (!DO_INDEX(ip))
ip->i_size += PSIZE;
/*
* propagate up the router entry for the leaf page just split
*
* insert a router entry for the new page into the parent page,
* propagate the insert/split up the tree by walking back the stack
* of (bn of parent page, index of child page entry in parent page)
* that were traversed during the search for the page that split.
*
* the propagation of insert/split up the tree stops if the root
* splits or the page inserted into doesn't have to split to hold
* the new entry.
*
* the parent entry for the split page remains the same, and
* a new entry is inserted at its right with the first key and
* block number of the new right page.
*
* There are a maximum of 4 pages pinned at any time:
* two children, left parent and right parent (when the parent splits).
* keep the child pages pinned while working on the parent.
* make sure that all pins are released at exit.
*/
while ((parent = BT_POP(btstack)) != NULL) {
/* parent page specified by stack frame <parent> */
/* keep current child pages (<lp>, <rp>) pinned */
lmp = smp;
lp = sp;
/*
* insert router entry in parent for new right child page <rp>
*/
/* get the parent page <sp> */
DT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
if (rc) {
DT_PUTPAGE(lmp);
DT_PUTPAGE(rmp);
goto splitOut;
}
/*
* The new key entry goes ONE AFTER the index of parent entry,
* because the split was to the right.
*/
skip = parent->index + 1;
/*
* compute the key for the router entry
*
* key suffix compression:
* for internal pages that have leaf pages as children,
* retain only what's needed to distinguish between
* the new entry and the entry on the page to its left.
* If the keys compare equal, retain the entire key.
*
* note that compression is performed only at computing
* router key at the lowest internal level.
* further compression of the key between pairs of higher
* level internal pages loses too much information and
* the search may fail.
* (e.g., two adjacent leaf pages of {a, ..., x} {xx, ...,}
* results in two adjacent parent entries (a)(xx).
* if split occurs between these two entries, and
* if compression is applied, the router key of parent entry
* of right page (x) will divert search for x into right
* subtree and miss x in the left subtree.)
*
* the entire key must be retained for the next-to-leftmost
* internal key at any level of the tree, or search may fail
* (e.g., ?)
*/
switch (rp->header.flag & BT_TYPE) {
case BT_LEAF:
/*
* compute the length of prefix for suffix compression
* between last entry of left page and first entry
* of right page
*/
if ((sp->header.flag & BT_ROOT && skip > 1) ||
sp->header.prev != 0 || skip > 1) {
/* compute uppercase router prefix key */
rc = ciGetLeafPrefixKey(lp,
lp->header.nextindex-1,
rp, 0, &key,
sbi->mntflag);
if (rc) {
DT_PUTPAGE(lmp);
DT_PUTPAGE(rmp);
DT_PUTPAGE(smp);
goto splitOut;
}
} else {
/* next to leftmost entry of
lowest internal level */
/* compute uppercase router key */
dtGetKey(rp, 0, &key, sbi->mntflag);
key.name[key.namlen] = 0;
if ((sbi->mntflag & JFS_OS2) == JFS_OS2)
ciToUpper(&key);
}
n = NDTINTERNAL(key.namlen);
break;
case BT_INTERNAL:
dtGetKey(rp, 0, &key, sbi->mntflag);
n = NDTINTERNAL(key.namlen);
break;
default:
jfs_err("dtSplitUp(): UFO!");
break;
}
/* unpin left child page */
DT_PUTPAGE(lmp);
/*
* compute the data for the router entry
*/
data->xd = rpxd; /* child page xd */
/*
* parent page is full - split the parent page
*/
if (n > sp->header.freecnt) {
/* init for parent page split */
split->mp = smp;
split->index = skip; /* index at insert */
split->nslot = n;
split->key = &key;
/* split->data = data; */
/* unpin right child page */
DT_PUTPAGE(rmp);
/* The split routines insert the new entry,
* acquire txLock as appropriate.
* return <rp> pinned and its block number <rbn>.
*/
rc = (sp->header.flag & BT_ROOT) ?
dtSplitRoot(tid, ip, split, &rmp) :
dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd);
if (rc) {
DT_PUTPAGE(smp);
goto splitOut;
}
/* smp and rmp are pinned */
}
/*
* parent page is not full - insert router entry in parent page
*/
else {
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the parent page
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
/* linelock header */
lv->offset = 0;
lv->length = 1;
dtlck->index++;
/* linelock stbl of non-root parent page */
if (!(sp->header.flag & BT_ROOT)) {
lv++;
n = skip >> L2DTSLOTSIZE;
lv->offset = sp->header.stblindex + n;
lv->length =
((sp->header.nextindex -
1) >> L2DTSLOTSIZE) - n + 1;
dtlck->index++;
}
dtInsertEntry(sp, skip, &key, data, &dtlck);
/* exit propagate up */
break;
}
}
/* unpin current split and its right page */
DT_PUTPAGE(smp);
DT_PUTPAGE(rmp);
/*
* free remaining extents allocated for split
*/
splitOut:
n = pxdlist.npxd;
pxd = &pxdlist.pxd[n];
for (; n < pxdlist.maxnpxd; n++, pxd++)
dbFree(ip, addressPXD(pxd), (s64) lengthPXD(pxd));
freeKeyName:
kfree(key.name);
/* Rollback quota allocation */
if (rc && quota_allocation)
dquot_free_block(ip, quota_allocation);
dtSplitUp_Exit:
return rc;
}
/*
* dtSplitPage()
*
* function: Split a non-root page of a btree.
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* return split and new page pinned;
*/
static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
{
int rc = 0;
struct metapage *smp;
dtpage_t *sp;
struct metapage *rmp;
dtpage_t *rp; /* new right page allocated */
s64 rbn; /* new right page block number */
struct metapage *mp;
dtpage_t *p;
s64 nextbn;
struct pxdlist *pxdlist;
pxd_t *pxd;
int skip, nextindex, half, left, nxt, off, si;
struct ldtentry *ldtentry;
struct idtentry *idtentry;
u8 *stbl;
struct dtslot *f;
int fsi, stblsize;
int n;
struct dt_lock *sdtlck, *rdtlck;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *slv, *rlv, *lv;
/* get split page */
smp = split->mp;
sp = DT_PAGE(ip, smp);
/*
* allocate the new right page for the split
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
rbn = addressPXD(pxd);
rmp = get_metapage(ip, rbn, PSIZE, 1);
if (rmp == NULL)
return -EIO;
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return rc;
}
jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
BT_MARK_DIRTY(rmp, ip);
/*
* acquire a transaction lock on the new right page
*/
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
rdtlck = (struct dt_lock *) & tlck->lock;
rp = (dtpage_t *) rmp->data;
*rpp = rp;
rp->header.self = *pxd;
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the split page
*
* action:
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
sdtlck = (struct dt_lock *) & tlck->lock;
/* linelock header of split page */
ASSERT(sdtlck->index == 0);
slv = & sdtlck->lv[0];
slv->offset = 0;
slv->length = 1;
sdtlck->index++;
/*
* initialize/update sibling pointers between sp and rp
*/
nextbn = le64_to_cpu(sp->header.next);
rp->header.next = cpu_to_le64(nextbn);
rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
sp->header.next = cpu_to_le64(rbn);
/*
* initialize new right page
*/
rp->header.flag = sp->header.flag;
/* compute sorted entry table at start of extent data area */
rp->header.nextindex = 0;
rp->header.stblindex = 1;
n = PSIZE >> L2DTSLOTSIZE;
rp->header.maxslot = n;
stblsize = (n + 31) >> L2DTSLOTSIZE; /* in unit of slot */
/* init freelist */
fsi = rp->header.stblindex + stblsize;
rp->header.freelist = fsi;
rp->header.freecnt = rp->header.maxslot - fsi;
/*
* sequential append at tail: append without split
*
* If splitting the last page on a level because of appending
* a entry to it (skip is maxentry), it's likely that the access is
* sequential. Adding an empty page on the side of the level is less
* work and can push the fill factor much higher than normal.
* If we're wrong it's no big deal, we'll just do the split the right
* way next time.
* (It may look like it's equally easy to do a similar hack for
* reverse sorted data, that is, split the tree left,
* but it's not. Be my guest.)
*/
if (nextbn == 0 && split->index == sp->header.nextindex) {
/* linelock header + stbl (first slot) of new page */
rlv = & rdtlck->lv[rdtlck->index];
rlv->offset = 0;
rlv->length = 2;
rdtlck->index++;
/*
* initialize freelist of new right page
*/
f = &rp->slot[fsi];
for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/* insert entry at the first entry of the new right page */
dtInsertEntry(rp, 0, split->key, split->data, &rdtlck);
goto out;
}
/*
* non-sequential insert (at possibly middle page)
*/
/*
* update prev pointer of previous right sibling page;
*/
if (nextbn != 0) {
DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
if (rc) {
discard_metapage(rmp);
return rc;
}
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the next page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
jfs_info("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p",
tlck, ip, mp);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header of previous right sibling page */
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
p->header.prev = cpu_to_le64(rbn);
DT_PUTPAGE(mp);
}
/*
* split the data between the split and right pages.
*/
skip = split->index;
half = (PSIZE >> L2DTSLOTSIZE) >> 1; /* swag */
left = 0;
/*
* compute fill factor for split pages
*
* <nxt> traces the next entry to move to rp
* <off> traces the next entry to stay in sp
*/
stbl = (u8 *) & sp->slot[sp->header.stblindex];
nextindex = sp->header.nextindex;
for (nxt = off = 0; nxt < nextindex; ++off) {
if (off == skip)
/* check for fill factor with new entry size */
n = split->nslot;
else {
si = stbl[nxt];
switch (sp->header.flag & BT_TYPE) {
case BT_LEAF:
ldtentry = (struct ldtentry *) & sp->slot[si];
if (DO_INDEX(ip))
n = NDTLEAF(ldtentry->namlen);
else
n = NDTLEAF_LEGACY(ldtentry->
namlen);
break;
case BT_INTERNAL:
idtentry = (struct idtentry *) & sp->slot[si];
n = NDTINTERNAL(idtentry->namlen);
break;
default:
break;
}
++nxt; /* advance to next entry to move in sp */
}
left += n;
if (left >= half)
break;
}
/* <nxt> poins to the 1st entry to move */
/*
* move entries to right page
*
* dtMoveEntry() initializes rp and reserves entry for insertion
*
* split page moved out entries are linelocked;
* new/right page moved in entries are linelocked;
*/
/* linelock header + stbl of new right page */
rlv = & rdtlck->lv[rdtlck->index];
rlv->offset = 0;
rlv->length = 5;
rdtlck->index++;
dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip));
sp->header.nextindex = nxt;
/*
* finalize freelist of new right page
*/
fsi = rp->header.freelist;
f = &rp->slot[fsi];
for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/*
* Update directory index table for entries now in right page
*/
if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
s64 lblock;
mp = NULL;
stbl = DT_GETSTBL(rp);
for (n = 0; n < rp->header.nextindex; n++) {
ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
modify_index(tid, ip, le32_to_cpu(ldtentry->index),
rbn, n, &mp, &lblock);
}
if (mp)
release_metapage(mp);
}
/*
* the skipped index was on the left page,
*/
if (skip <= off) {
/* insert the new entry in the split page */
dtInsertEntry(sp, skip, split->key, split->data, &sdtlck);
/* linelock stbl of split page */
if (sdtlck->index >= sdtlck->maxcnt)
sdtlck = (struct dt_lock *) txLinelock(sdtlck);
slv = & sdtlck->lv[sdtlck->index];
n = skip >> L2DTSLOTSIZE;
slv->offset = sp->header.stblindex + n;
slv->length =
((sp->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
sdtlck->index++;
}
/*
* the skipped index was on the right page,
*/
else {
/* adjust the skip index to reflect the new position */
skip -= nxt;
/* insert the new entry in the right page */
dtInsertEntry(rp, skip, split->key, split->data, &rdtlck);
}
out:
*rmpp = rmp;
*rpxdp = *pxd;
return rc;
}
/*
* dtExtendPage()
*
* function: extend 1st/only directory leaf page
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* return extended page pinned;
*/
static int dtExtendPage(tid_t tid,
struct inode *ip, struct dtsplit * split, struct btstack * btstack)
{
struct super_block *sb = ip->i_sb;
int rc;
struct metapage *smp, *pmp, *mp;
dtpage_t *sp, *pp;
struct pxdlist *pxdlist;
pxd_t *pxd, *tpxd;
int xlen, xsize;
int newstblindex, newstblsize;
int oldstblindex, oldstblsize;
int fsi, last;
struct dtslot *f;
struct btframe *parent;
int n;
struct dt_lock *dtlck;
s64 xaddr, txaddr;
struct tlock *tlck;
struct pxd_lock *pxdlock;
struct lv *lv;
uint type;
struct ldtentry *ldtentry;
u8 *stbl;
/* get page to extend */
smp = split->mp;
sp = DT_PAGE(ip, smp);
/* get parent/root page */
parent = BT_POP(btstack);
DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc);
if (rc)
return (rc);
/*
* extend the extent
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
xaddr = addressPXD(pxd);
tpxd = &sp->header.self;
txaddr = addressPXD(tpxd);
/* in-place extension */
if (xaddr == txaddr) {
type = tlckEXTEND;
}
/* relocation */
else {
type = tlckNEW;
/* save moved extent descriptor for later free */
tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = sp->header.self;
pxdlock->index = 1;
/*
* Update directory index table to reflect new page address
*/
if (DO_INDEX(ip)) {
s64 lblock;
mp = NULL;
stbl = DT_GETSTBL(sp);
for (n = 0; n < sp->header.nextindex; n++) {
ldtentry =
(struct ldtentry *) & sp->slot[stbl[n]];
modify_index(tid, ip,
le32_to_cpu(ldtentry->index),
xaddr, n, &mp, &lblock);
}
if (mp)
release_metapage(mp);
}
}
/*
* extend the page
*/
sp->header.self = *pxd;
jfs_info("dtExtendPage: ip:0x%p smp:0x%p sp:0x%p", ip, smp, sp);
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the extended/leaf page
*/
tlck = txLock(tid, ip, smp, tlckDTREE | type);
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[0];
/* update buffer extent descriptor of extended page */
xlen = lengthPXD(pxd);
xsize = xlen << JFS_SBI(sb)->l2bsize;
/*
* copy old stbl to new stbl at start of extended area
*/
oldstblindex = sp->header.stblindex;
oldstblsize = (sp->header.maxslot + 31) >> L2DTSLOTSIZE;
newstblindex = sp->header.maxslot;
n = xsize >> L2DTSLOTSIZE;
newstblsize = (n + 31) >> L2DTSLOTSIZE;
memcpy(&sp->slot[newstblindex], &sp->slot[oldstblindex],
sp->header.nextindex);
/*
* in-line extension: linelock old area of extended page
*/
if (type == tlckEXTEND) {
/* linelock header */
lv->offset = 0;
lv->length = 1;
dtlck->index++;
lv++;
/* linelock new stbl of extended page */
lv->offset = newstblindex;
lv->length = newstblsize;
}
/*
* relocation: linelock whole relocated area
*/
else {
lv->offset = 0;
lv->length = sp->header.maxslot + newstblsize;
}
dtlck->index++;
sp->header.maxslot = n;
sp->header.stblindex = newstblindex;
/* sp->header.nextindex remains the same */
/*
* add old stbl region at head of freelist
*/
fsi = oldstblindex;
f = &sp->slot[fsi];
last = sp->header.freelist;
for (n = 0; n < oldstblsize; n++, fsi++, f++) {
f->next = last;
last = fsi;
}
sp->header.freelist = last;
sp->header.freecnt += oldstblsize;
/*
* append free region of newly extended area at tail of freelist
*/
/* init free region of newly extended area */
fsi = n = newstblindex + newstblsize;
f = &sp->slot[fsi];
for (fsi++; fsi < sp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/* append new free region at tail of old freelist */
fsi = sp->header.freelist;
if (fsi == -1)
sp->header.freelist = n;
else {
do {
f = &sp->slot[fsi];
fsi = f->next;
} while (fsi != -1);
f->next = n;
}
sp->header.freecnt += sp->header.maxslot - n;
/*
* insert the new entry
*/
dtInsertEntry(sp, split->index, split->key, split->data, &dtlck);
BT_MARK_DIRTY(pmp, ip);
/*
* linelock any freeslots residing in old extent
*/
if (type == tlckEXTEND) {
n = sp->header.maxslot >> 2;
if (sp->header.freelist < n)
dtLinelockFreelist(sp, n, &dtlck);
}
/*
* update parent entry on the parent/root page
*/
/*
* acquire a transaction lock on the parent/root page
*/
tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[dtlck->index];
/* linelock parent entry - 1st slot */
lv->offset = 1;
lv->length = 1;
dtlck->index++;
/* update the parent pxd for page extension */
tpxd = (pxd_t *) & pp->slot[1];
*tpxd = *pxd;
DT_PUTPAGE(pmp);
return 0;
}
/*
* dtSplitRoot()
*
* function:
* split the full root page into
* original/root/split page and new right page
* i.e., root remains fixed in tree anchor (inode) and
* the root is copied to a single new right child page
* since root page << non-root page, and
* the split root page contains a single entry for the
* new right child page.
*
* parameter:
*
* return: 0 - success;
* errno - failure;
* return new page pinned;
*/
static int dtSplitRoot(tid_t tid,
struct inode *ip, struct dtsplit * split, struct metapage ** rmpp)
{
struct super_block *sb = ip->i_sb;
struct metapage *smp;
dtroot_t *sp;
struct metapage *rmp;
dtpage_t *rp;
s64 rbn;
int xlen;
int xsize;
struct dtslot *f;
s8 *stbl;
int fsi, stblsize, n;
struct idtentry *s;
pxd_t *ppxd;
struct pxdlist *pxdlist;
pxd_t *pxd;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int rc;
/* get split root page */
smp = split->mp;
sp = &JFS_IP(ip)->i_dtroot;
/*
* allocate/initialize a single (right) child page
*
* N.B. at first split, a one (or two) block to fit new entry
* is allocated; at subsequent split, a full page is allocated;
*/
pxdlist = split->pxdlist;
pxd = &pxdlist->pxd[pxdlist->npxd];
pxdlist->npxd++;
rbn = addressPXD(pxd);
xlen = lengthPXD(pxd);
xsize = xlen << JFS_SBI(sb)->l2bsize;
rmp = get_metapage(ip, rbn, xsize, 1);
if (!rmp)
return -EIO;
rp = rmp->data;
/* Allocate blocks to quota. */
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return rc;
}
BT_MARK_DIRTY(rmp, ip);
/*
* acquire a transaction lock on the new right page
*/
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
dtlck = (struct dt_lock *) & tlck->lock;
rp->header.flag =
(sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
rp->header.self = *pxd;
/* initialize sibling pointers */
rp->header.next = 0;
rp->header.prev = 0;
/*
* move in-line root page into new right page extent
*/
/* linelock header + copied entries + new stbl (1st slot) in new page */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 10; /* 1 + 8 + 1 */
dtlck->index++;
n = xsize >> L2DTSLOTSIZE;
rp->header.maxslot = n;
stblsize = (n + 31) >> L2DTSLOTSIZE;
/* copy old stbl to new stbl at start of extended area */
rp->header.stblindex = DTROOTMAXSLOT;
stbl = (s8 *) & rp->slot[DTROOTMAXSLOT];
memcpy(stbl, sp->header.stbl, sp->header.nextindex);
rp->header.nextindex = sp->header.nextindex;
/* copy old data area to start of new data area */
memcpy(&rp->slot[1], &sp->slot[1], IDATASIZE);
/*
* append free region of newly extended area at tail of freelist
*/
/* init free region of newly extended area */
fsi = n = DTROOTMAXSLOT + stblsize;
f = &rp->slot[fsi];
for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
f->next = fsi;
f->next = -1;
/* append new free region at tail of old freelist */
fsi = sp->header.freelist;
if (fsi == -1)
rp->header.freelist = n;
else {
rp->header.freelist = fsi;
do {
f = &rp->slot[fsi];
fsi = f->next;
} while (fsi != -1);
f->next = n;
}
rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n;
/*
* Update directory index table for entries now in right page
*/
if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
s64 lblock;
struct metapage *mp = NULL;
struct ldtentry *ldtentry;
stbl = DT_GETSTBL(rp);
for (n = 0; n < rp->header.nextindex; n++) {
ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
modify_index(tid, ip, le32_to_cpu(ldtentry->index),
rbn, n, &mp, &lblock);
}
if (mp)
release_metapage(mp);
}
/*
* insert the new entry into the new right/child page
* (skip index in the new right page will not change)
*/
dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
/*
* reset parent/root page
*
* set the 1st entry offset to 0, which force the left-most key
* at any level of the tree to be less than any search key.
*
* The btree comparison code guarantees that the left-most key on any
* level of the tree is never used, so it doesn't need to be filled in.
*/
BT_MARK_DIRTY(smp, ip);
/*
* acquire a transaction lock on the root page (in-memory inode)
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock root */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = DTROOTMAXSLOT;
dtlck->index++;
/* update page header of root */
if (sp->header.flag & BT_LEAF) {
sp->header.flag &= ~BT_LEAF;
sp->header.flag |= BT_INTERNAL;
}
/* init the first entry */
s = (struct idtentry *) & sp->slot[DTENTRYSTART];
ppxd = (pxd_t *) s;
*ppxd = *pxd;
s->next = -1;
s->namlen = 0;
stbl = sp->header.stbl;
stbl[0] = DTENTRYSTART;
sp->header.nextindex = 1;
/* init freelist */
fsi = DTENTRYSTART + 1;
f = &sp->slot[fsi];
/* init free region of remaining area */
for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
f->next = fsi;
f->next = -1;
sp->header.freelist = DTENTRYSTART + 1;
sp->header.freecnt = DTROOTMAXSLOT - (DTENTRYSTART + 1);
*rmpp = rmp;
return 0;
}
/*
* dtDelete()
*
* function: delete the entry(s) referenced by a key.
*
* parameter:
*
* return:
*/
int dtDelete(tid_t tid,
struct inode *ip, struct component_name * key, ino_t * ino, int flag)
{
int rc = 0;
s64 bn;
struct metapage *mp, *imp;
dtpage_t *p;
int index;
struct btstack btstack;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int i;
struct ldtentry *ldtentry;
u8 *stbl;
u32 table_index, next_index;
struct metapage *nmp;
dtpage_t *np;
/*
* search for the entry to delete:
*
* dtSearch() returns (leaf page pinned, index at which to delete).
*/
if ((rc = dtSearch(ip, key, ino, &btstack, flag)))
return rc;
/* retrieve search result */
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
/*
* We need to find put the index of the next entry into the
* directory index table in order to resume a readdir from this
* entry.
*/
if (DO_INDEX(ip)) {
stbl = DT_GETSTBL(p);
ldtentry = (struct ldtentry *) & p->slot[stbl[index]];
table_index = le32_to_cpu(ldtentry->index);
if (index == (p->header.nextindex - 1)) {
/*
* Last entry in this leaf page
*/
if ((p->header.flag & BT_ROOT)
|| (p->header.next == 0))
next_index = -1;
else {
/* Read next leaf page */
DT_GETPAGE(ip, le64_to_cpu(p->header.next),
nmp, PSIZE, np, rc);
if (rc)
next_index = -1;
else {
stbl = DT_GETSTBL(np);
ldtentry =
(struct ldtentry *) & np->
slot[stbl[0]];
next_index =
le32_to_cpu(ldtentry->index);
DT_PUTPAGE(nmp);
}
}
} else {
ldtentry =
(struct ldtentry *) & p->slot[stbl[index + 1]];
next_index = le32_to_cpu(ldtentry->index);
}
free_index(tid, ip, table_index, next_index);
}
/*
* the leaf page becomes empty, delete the page
*/
if (p->header.nextindex == 1) {
/* delete empty page */
rc = dtDeleteUp(tid, ip, mp, p, &btstack);
}
/*
* the leaf page has other entries remaining:
*
* delete the entry from the leaf page.
*/
else {
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the leaf page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
/*
* Do not assume that dtlck->index will be zero. During a
* rename within a directory, this transaction may have
* modified this page already when adding the new entry.
*/
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
i = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + i;
lv->length =
((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
i + 1;
dtlck->index++;
}
/* free the leaf entry */
dtDeleteEntry(p, index, &dtlck);
/*
* Update directory index table for entries moved in stbl
*/
if (DO_INDEX(ip) && index < p->header.nextindex) {
s64 lblock;
imp = NULL;
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
ldtentry =
(struct ldtentry *) & p->slot[stbl[i]];
modify_index(tid, ip,
le32_to_cpu(ldtentry->index),
bn, i, &imp, &lblock);
}
if (imp)
release_metapage(imp);
}
DT_PUTPAGE(mp);
}
return rc;
}
/*
* dtDeleteUp()
*
* function:
* free empty pages as propagating deletion up the tree
*
* parameter:
*
* return:
*/
static int dtDeleteUp(tid_t tid, struct inode *ip,
struct metapage * fmp, dtpage_t * fp, struct btstack * btstack)
{
int rc = 0;
struct metapage *mp;
dtpage_t *p;
int index, nextindex;
int xlen;
struct btframe *parent;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
struct pxd_lock *pxdlock;
int i;
/*
* keep the root leaf page which has become empty
*/
if (BT_IS_ROOT(fmp)) {
/*
* reset the root
*
* dtInitRoot() acquires txlock on the root
*/
dtInitRoot(tid, ip, PARENT(ip));
DT_PUTPAGE(fmp);
return 0;
}
/*
* free the non-root leaf page
*/
/*
* acquire a transaction lock on the page
*
* write FREEXTENT|NOREDOPAGE log record
* N.B. linelock is overlaid as freed extent descriptor, and
* the buffer page is freed;
*/
tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = fp->header.self;
pxdlock->index = 1;
/* update sibling pointers */
if ((rc = dtRelink(tid, ip, fp))) {
BT_PUTPAGE(fmp);
return rc;
}
xlen = lengthPXD(&fp->header.self);
/* Free quota allocation. */
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
/*
* propagate page deletion up the directory tree
*
* If the delete from the parent page makes it empty,
* continue all the way up the tree.
* stop if the root page is reached (which is never deleted) or
* if the entry deletion does not empty the page.
*/
while ((parent = BT_POP(btstack)) != NULL) {
/* pin the parent page <sp> */
DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/*
* free the extent of the child page deleted
*/
index = parent->index;
/*
* delete the entry for the child page from parent
*/
nextindex = p->header.nextindex;
/*
* the parent has the single entry being deleted:
*
* free the parent page which has become empty.
*/
if (nextindex == 1) {
/*
* keep the root internal page which has become empty
*/
if (p->header.flag & BT_ROOT) {
/*
* reset the root
*
* dtInitRoot() acquires txlock on the root
*/
dtInitRoot(tid, ip, PARENT(ip));
DT_PUTPAGE(mp);
return 0;
}
/*
* free the parent page
*/
else {
/*
* acquire a transaction lock on the page
*
* write FREEXTENT|NOREDOPAGE log record
*/
tlck =
txMaplock(tid, ip,
tlckDTREE | tlckFREE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = p->header.self;
pxdlock->index = 1;
/* update sibling pointers */
if ((rc = dtRelink(tid, ip, p))) {
DT_PUTPAGE(mp);
return rc;
}
xlen = lengthPXD(&p->header.self);
/* Free quota allocation */
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);
/* propagate up */
continue;
}
}
/*
* the parent has other entries remaining:
*
* delete the router entry from the parent page.
*/
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the page
*
* action: router entry deletion
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
i = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + i;
lv->length =
((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
i + 1;
dtlck->index++;
}
/* free the router entry */
dtDeleteEntry(p, index, &dtlck);
/* reset key of new leftmost entry of level (for consistency) */
if (index == 0 &&
((p->header.flag & BT_ROOT) || p->header.prev == 0))
dtTruncateEntry(p, 0, &dtlck);
/* unpin the parent page */
DT_PUTPAGE(mp);
/* exit propagation up */
break;
}
if (!DO_INDEX(ip))
ip->i_size -= PSIZE;
return 0;
}
#ifdef _NOTYET
/*
* NAME: dtRelocate()
*
* FUNCTION: relocate dtpage (internal or leaf) of directory;
* This function is mainly used by defragfs utility.
*/
int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
s64 nxaddr)
{
int rc = 0;
struct metapage *mp, *pmp, *lmp, *rmp;
dtpage_t *p, *pp, *rp = 0, *lp= 0;
s64 bn;
int index;
struct btstack btstack;
pxd_t *pxd;
s64 oxaddr, nextbn, prevbn;
int xlen, xsize;
struct tlock *tlck;
struct dt_lock *dtlck;
struct pxd_lock *pxdlock;
s8 *stbl;
struct lv *lv;
oxaddr = addressPXD(opxd);
xlen = lengthPXD(opxd);
jfs_info("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d",
(long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
xlen);
/*
* 1. get the internal parent dtpage covering
* router entry for the tartget page to be relocated;
*/
rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
if (rc)
return rc;
/* retrieve search result */
DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
jfs_info("dtRelocate: parent router entry validated.");
/*
* 2. relocate the target dtpage
*/
/* read in the target page from src extent */
DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
if (rc) {
/* release the pinned parent page */
DT_PUTPAGE(pmp);
return rc;
}
/*
* read in sibling pages if any to update sibling pointers;
*/
rmp = NULL;
if (p->header.next) {
nextbn = le64_to_cpu(p->header.next);
DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
if (rc) {
DT_PUTPAGE(mp);
DT_PUTPAGE(pmp);
return (rc);
}
}
lmp = NULL;
if (p->header.prev) {
prevbn = le64_to_cpu(p->header.prev);
DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
if (rc) {
DT_PUTPAGE(mp);
DT_PUTPAGE(pmp);
if (rmp)
DT_PUTPAGE(rmp);
return (rc);
}
}
/* at this point, all xtpages to be updated are in memory */
/*
* update sibling pointers of sibling dtpages if any;
*/
if (lmp) {
tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
lp->header.next = cpu_to_le64(nxaddr);
DT_PUTPAGE(lmp);
}
if (rmp) {
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
rp->header.prev = cpu_to_le64(nxaddr);
DT_PUTPAGE(rmp);
}
/*
* update the target dtpage to be relocated
*
* write LOG_REDOPAGE of LOG_NEW type for dst page
* for the whole target page (logredo() will apply
* after image and update bmap for allocation of the
* dst extent), and update bmap for allocation of
* the dst extent;
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
/* update the self address in the dtpage header */
pxd = &p->header.self;
PXDaddress(pxd, nxaddr);
/* the dst page is the same as the src page, i.e.,
* linelock for afterimage of the whole page;
*/
lv->offset = 0;
lv->length = p->header.maxslot;
dtlck->index++;
/* update the buffer extent descriptor of the dtpage */
xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
/* unpin the relocated page */
DT_PUTPAGE(mp);
jfs_info("dtRelocate: target dtpage relocated.");
/* the moved extent is dtpage, then a LOG_NOREDOPAGE log rec
* needs to be written (in logredo(), the LOG_NOREDOPAGE log rec
* will also force a bmap update ).
*/
/*
* 3. acquire maplock for the source extent to be freed;
*/
/* for dtpage relocation, write a LOG_NOREDOPAGE record
* for the source dtpage (logredo() will init NoRedoPage
* filter and will also update bmap for free of the source
* dtpage), and upadte bmap for free of the source dtpage;
*/
tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, oxaddr);
PXDlength(&pxdlock->pxd, xlen);
pxdlock->index = 1;
/*
* 4. update the parent router entry for relocation;
*
* acquire tlck for the parent entry covering the target dtpage;
* write LOG_REDOPAGE to apply after image only;
*/
jfs_info("dtRelocate: update parent router entry.");
tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[dtlck->index];
/* update the PXD with the new address */
stbl = DT_GETSTBL(pp);
pxd = (pxd_t *) & pp->slot[stbl[index]];
PXDaddress(pxd, nxaddr);
lv->offset = stbl[index];
lv->length = 1;
dtlck->index++;
/* unpin the parent dtpage */
DT_PUTPAGE(pmp);
return rc;
}
/*
* NAME: dtSearchNode()
*
* FUNCTION: Search for an dtpage containing a specified address
* This function is mainly used by defragfs utility.
*
* NOTE: Search result on stack, the found page is pinned at exit.
* The result page must be an internal dtpage.
* lmxaddr give the address of the left most page of the
* dtree level, in which the required dtpage resides.
*/
static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
struct btstack * btstack)
{
int rc = 0;
s64 bn;
struct metapage *mp;
dtpage_t *p;
int psize = 288; /* initial in-line directory */
s8 *stbl;
int i;
pxd_t *pxd;
struct btframe *btsp;
BT_CLR(btstack); /* reset stack */
/*
* descend tree to the level with specified leftmost page
*
* by convention, root bn = 0.
*/
for (bn = 0;;) {
/* get/pin the page to search */
DT_GETPAGE(ip, bn, mp, psize, p, rc);
if (rc)
return rc;
/* does the xaddr of leftmost page of the levevl
* matches levevl search key ?
*/
if (p->header.flag & BT_ROOT) {
if (lmxaddr == 0)
break;
} else if (addressPXD(&p->header.self) == lmxaddr)
break;
/*
* descend down to leftmost child page
*/
if (p->header.flag & BT_LEAF) {
DT_PUTPAGE(mp);
return -ESTALE;
}
/* get the leftmost entry */
stbl = DT_GETSTBL(p);
pxd = (pxd_t *) & p->slot[stbl[0]];
/* get the child page block address */
bn = addressPXD(pxd);
psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
/* unpin the parent page */
DT_PUTPAGE(mp);
}
/*
* search each page at the current levevl
*/
loop:
stbl = DT_GETSTBL(p);
for (i = 0; i < p->header.nextindex; i++) {
pxd = (pxd_t *) & p->slot[stbl[i]];
/* found the specified router entry */
if (addressPXD(pxd) == addressPXD(kpxd) &&
lengthPXD(pxd) == lengthPXD(kpxd)) {
btsp = btstack->top;
btsp->bn = bn;
btsp->index = i;
btsp->mp = mp;
return 0;
}
}
/* get the right sibling page if any */
if (p->header.next)
bn = le64_to_cpu(p->header.next);
else {
DT_PUTPAGE(mp);
return -ESTALE;
}
/* unpin current page */
DT_PUTPAGE(mp);
/* get the right sibling page */
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
goto loop;
}
#endif /* _NOTYET */
/*
* dtRelink()
*
* function:
* link around a freed page.
*
* parameter:
* fp: page to be freed
*
* return:
*/
static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
{
int rc;
struct metapage *mp;
s64 nextbn, prevbn;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
nextbn = le64_to_cpu(p->header.next);
prevbn = le64_to_cpu(p->header.prev);
/* update prev pointer of the next page */
if (nextbn != 0) {
DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the next page
*
* action: update prev pointer;
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
jfs_info("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
tlck, ip, mp);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
p->header.prev = cpu_to_le64(prevbn);
DT_PUTPAGE(mp);
}
/* update next pointer of the previous page */
if (prevbn != 0) {
DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
if (rc)
return rc;
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the prev page
*
* action: update next pointer;
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
jfs_info("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
tlck, ip, mp);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
p->header.next = cpu_to_le64(nextbn);
DT_PUTPAGE(mp);
}
return 0;
}
/*
* dtInitRoot()
*
* initialize directory root (inline in inode)
*/
void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
dtroot_t *p;
int fsi;
struct dtslot *f;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
u16 xflag_save;
/*
* If this was previously an non-empty directory, we need to remove
* the old directory table.
*/
if (DO_INDEX(ip)) {
if (!jfs_dirtable_inline(ip)) {
struct tblock *tblk = tid_to_tblock(tid);
/*
* We're playing games with the tid's xflag. If
* we're removing a regular file, the file's xtree
* is committed with COMMIT_PMAP, but we always
* commit the directories xtree with COMMIT_PWMAP.
*/
xflag_save = tblk->xflag;
tblk->xflag = 0;
/*
* xtTruncate isn't guaranteed to fully truncate
* the xtree. The caller needs to check i_size
* after committing the transaction to see if
* additional truncation is needed. The
* COMMIT_Stale flag tells caller that we
* initiated the truncation.
*/
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
set_cflag(COMMIT_Stale, ip);
tblk->xflag = xflag_save;
} else
ip->i_size = 1;
jfs_ip->next_index = 2;
} else
ip->i_size = IDATASIZE;
/*
* acquire a transaction lock on the root
*
* action: directory initialization;
*/
tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag,
tlckDTREE | tlckENTRY | tlckBTROOT);
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock root */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = DTROOTMAXSLOT;
dtlck->index++;
p = &jfs_ip->i_dtroot;
p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
p->header.nextindex = 0;
/* init freelist */
fsi = 1;
f = &p->slot[fsi];
/* init data area of root */
for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
f->next = fsi;
f->next = -1;
p->header.freelist = 1;
p->header.freecnt = 8;
/* init '..' entry */
p->header.idotdot = cpu_to_le32(idotdot);
return;
}
/*
* add_missing_indices()
*
* function: Fix dtree page in which one or more entries has an invalid index.
* fsck.jfs should really fix this, but it currently does not.
* Called from jfs_readdir when bad index is detected.
*/
static void add_missing_indices(struct inode *inode, s64 bn)
{
struct ldtentry *d;
struct dt_lock *dtlck;
int i;
uint index;
struct lv *lv;
struct metapage *mp;
dtpage_t *p;
int rc;
s8 *stbl;
tid_t tid;
struct tlock *tlck;
tid = txBegin(inode->i_sb, 0);
DT_GETPAGE(inode, bn, mp, PSIZE, p, rc);
if (rc) {
printk(KERN_ERR "DT_GETPAGE failed!\n");
goto end;
}
BT_MARK_DIRTY(mp, inode);
ASSERT(p->header.flag & BT_LEAF);
tlck = txLock(tid, inode, mp, tlckDTREE | tlckENTRY);
if (BT_IS_ROOT(mp))
tlck->type |= tlckBTROOT;
dtlck = (struct dt_lock *) &tlck->lock;
stbl = DT_GETSTBL(p);
for (i = 0; i < p->header.nextindex; i++) {
d = (struct ldtentry *) &p->slot[stbl[i]];
index = le32_to_cpu(d->index);
if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
d->index = cpu_to_le32(add_index(tid, inode, bn, i));
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = &dtlck->lv[dtlck->index];
lv->offset = stbl[i];
lv->length = 1;
dtlck->index++;
}
}
DT_PUTPAGE(mp);
(void) txCommit(tid, 1, &inode, 0);
end:
txEnd(tid);
}
/*
* Buffer to hold directory entry info while traversing a dtree page
* before being fed to the filldir function
*/
struct jfs_dirent {
loff_t position;
int ino;
u16 name_len;
char name[0];
};
/*
* function to determine next variable-sized jfs_dirent in buffer
*/
static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
{
return (struct jfs_dirent *)
((char *)dirent +
((sizeof (struct jfs_dirent) + dirent->name_len + 1 +
sizeof (loff_t) - 1) &
~(sizeof (loff_t) - 1)));
}
/*
* jfs_readdir()
*
* function: read directory entries sequentially
* from the specified entry offset
*
* parameter:
*
* return: offset = (pn, index) of start entry
* of next jfs_readdir()/dtRead()
*/
int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct inode *ip = filp->f_path.dentry->d_inode;
struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
int rc = 0;
loff_t dtpos; /* legacy OS/2 style position */
struct dtoffset {
s16 pn;
s16 index;
s32 unused;
} *dtoffset = (struct dtoffset *) &dtpos;
s64 bn;
struct metapage *mp;
dtpage_t *p;
int index;
s8 *stbl;
struct btstack btstack;
int i, next;
struct ldtentry *d;
struct dtslot *t;
int d_namleft, len, outlen;
unsigned long dirent_buf;
char *name_ptr;
u32 dir_index;
int do_index = 0;
uint loop_count = 0;
struct jfs_dirent *jfs_dirent;
int jfs_dirents;
int overflow, fix_page, page_fixed = 0;
static int unique_pos = 2; /* If we can't fix broken index */
if (filp->f_pos == DIREND)
return 0;
if (DO_INDEX(ip)) {
/*
* persistent index is stored in directory entries.
* Special cases: 0 = .
* 1 = ..
* -1 = End of directory
*/
do_index = 1;
dir_index = (u32) filp->f_pos;
if (dir_index > 1) {
struct dir_table_slot dirtab_slot;
if (dtEmpty(ip) ||
(dir_index >= JFS_IP(ip)->next_index)) {
/* Stale position. Directory has shrunk */
filp->f_pos = DIREND;
return 0;
}
repeat:
rc = read_index(ip, dir_index, &dirtab_slot);
if (rc) {
filp->f_pos = DIREND;
return rc;
}
if (dirtab_slot.flag == DIR_INDEX_FREE) {
if (loop_count++ > JFS_IP(ip)->next_index) {
jfs_err("jfs_readdir detected "
"infinite loop!");
filp->f_pos = DIREND;
return 0;
}
dir_index = le32_to_cpu(dirtab_slot.addr2);
if (dir_index == -1) {
filp->f_pos = DIREND;
return 0;
}
goto repeat;
}
bn = addressDTS(&dirtab_slot);
index = dirtab_slot.slot;
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc) {
filp->f_pos = DIREND;
return 0;
}
if (p->header.flag & BT_INTERNAL) {
jfs_err("jfs_readdir: bad index table");
DT_PUTPAGE(mp);
filp->f_pos = -1;
return 0;
}
} else {
if (dir_index == 0) {
/*
* self "."
*/
filp->f_pos = 0;
if (filldir(dirent, ".", 1, 0, ip->i_ino,
DT_DIR))
return 0;
}
/*
* parent ".."
*/
filp->f_pos = 1;
if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
return 0;
/*
* Find first entry of left-most leaf
*/
if (dtEmpty(ip)) {
filp->f_pos = DIREND;
return 0;
}
if ((rc = dtReadFirst(ip, &btstack)))
return rc;
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
}
} else {
/*
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6
*
* pn = index = 0: First entry "."
* pn = 0; index = 1: Second entry ".."
* pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries
*/
dtpos = filp->f_pos;
if (dtpos == 0) {
/* build "." entry */
if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
DT_DIR))
return 0;
dtoffset->index = 1;
filp->f_pos = dtpos;
}
if (dtoffset->pn == 0) {
if (dtoffset->index == 1) {
/* build ".." entry */
if (filldir(dirent, "..", 2, filp->f_pos,
PARENT(ip), DT_DIR))
return 0;
} else {
jfs_err("jfs_readdir called with "
"invalid offset!");
}
dtoffset->pn = 1;
dtoffset->index = 0;
filp->f_pos = dtpos;
}
if (dtEmpty(ip)) {
filp->f_pos = DIREND;
return 0;
}
if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) {
jfs_err("jfs_readdir: unexpected rc = %d "
"from dtReadNext", rc);
filp->f_pos = DIREND;
return 0;
}
/* get start leaf page and index */
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
/* offset beyond directory eof ? */
if (bn < 0) {
filp->f_pos = DIREND;
return 0;
}
}
dirent_buf = __get_free_page(GFP_KERNEL);
if (dirent_buf == 0) {
DT_PUTPAGE(mp);
jfs_warn("jfs_readdir: __get_free_page failed!");
filp->f_pos = DIREND;
return -ENOMEM;
}
while (1) {
jfs_dirent = (struct jfs_dirent *) dirent_buf;
jfs_dirents = 0;
overflow = fix_page = 0;
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
d = (struct ldtentry *) & p->slot[stbl[i]];
if (((long) jfs_dirent + d->namlen + 1) >
(dirent_buf + PAGE_SIZE)) {
/* DBCS codepages could overrun dirent_buf */
index = i;
overflow = 1;
break;
}
d_namleft = d->namlen;
name_ptr = jfs_dirent->name;
jfs_dirent->ino = le32_to_cpu(d->inumber);
if (do_index) {
len = min(d_namleft, DTLHDRDATALEN);
jfs_dirent->position = le32_to_cpu(d->index);
/*
* d->index should always be valid, but it
* isn't. fsck.jfs doesn't create the
* directory index for the lost+found
* directory. Rather than let it go,
* we can try to fix it.
*/
if ((jfs_dirent->position < 2) ||
(jfs_dirent->position >=
JFS_IP(ip)->next_index)) {
if (!page_fixed && !isReadOnly(ip)) {
fix_page = 1;
/*
* setting overflow and setting
* index to i will cause the
* same page to be processed
* again starting here
*/
overflow = 1;
index = i;
break;
}
jfs_dirent->position = unique_pos++;
}
} else {
jfs_dirent->position = dtpos;
len = min(d_namleft, DTLHDRDATALEN_LEGACY);
}
/* copy the name of head/only segment */
outlen = jfs_strfromUCS_le(name_ptr, d->name, len,
codepage);
jfs_dirent->name_len = outlen;
/* copy name in the additional segment(s) */
next = d->next;
while (next >= 0) {
t = (struct dtslot *) & p->slot[next];
name_ptr += outlen;
d_namleft -= len;
/* Sanity Check */
if (d_namleft == 0) {
jfs_error(ip->i_sb,
"JFS:Dtree error: ino = "
"%ld, bn=%Ld, index = %d",
(long)ip->i_ino,
(long long)bn,
i);
goto skip_one;
}
len = min(d_namleft, DTSLOTDATALEN);
outlen = jfs_strfromUCS_le(name_ptr, t->name,
len, codepage);
jfs_dirent->name_len += outlen;
next = t->next;
}
jfs_dirents++;
jfs_dirent = next_jfs_dirent(jfs_dirent);
skip_one:
if (!do_index)
dtoffset->index++;
}
if (!overflow) {
/* Point to next leaf page */
if (p->header.flag & BT_ROOT)
bn = 0;
else {
bn = le64_to_cpu(p->header.next);
index = 0;
/* update offset (pn:index) for new page */
if (!do_index) {
dtoffset->pn++;
dtoffset->index = 0;
}
}
page_fixed = 0;
}
/* unpin previous leaf page */
DT_PUTPAGE(mp);
jfs_dirent = (struct jfs_dirent *) dirent_buf;
while (jfs_dirents--) {
filp->f_pos = jfs_dirent->position;
if (filldir(dirent, jfs_dirent->name,
jfs_dirent->name_len, filp->f_pos,
jfs_dirent->ino, DT_UNKNOWN))
goto out;
jfs_dirent = next_jfs_dirent(jfs_dirent);
}
if (fix_page) {
add_missing_indices(ip, bn);
page_fixed = 1;
}
if (!overflow && (bn == 0)) {
filp->f_pos = DIREND;
break;
}
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc) {
free_page(dirent_buf);
return rc;
}
}
out:
free_page(dirent_buf);
return rc;
}
/*
* dtReadFirst()
*
* function: get the leftmost page of the directory
*/
static int dtReadFirst(struct inode *ip, struct btstack * btstack)
{
int rc = 0;
s64 bn;
int psize = 288; /* initial in-line directory */
struct metapage *mp;
dtpage_t *p;
s8 *stbl;
struct btframe *btsp;
pxd_t *xd;
BT_CLR(btstack); /* reset stack */
/*
* descend leftmost path of the tree
*
* by convention, root bn = 0.
*/
for (bn = 0;;) {
DT_GETPAGE(ip, bn, mp, psize, p, rc);
if (rc)
return rc;
/*
* leftmost leaf page
*/
if (p->header.flag & BT_LEAF) {
/* return leftmost entry */
btsp = btstack->top;
btsp->bn = bn;
btsp->index = 0;
btsp->mp = mp;
return 0;
}
/*
* descend down to leftmost child page
*/
if (BT_STACK_FULL(btstack)) {
DT_PUTPAGE(mp);
jfs_error(ip->i_sb, "dtReadFirst: btstack overrun");
BT_STACK_DUMP(btstack);
return -EIO;
}
/* push (bn, index) of the parent page/entry */
BT_PUSH(btstack, bn, 0);
/* get the leftmost entry */
stbl = DT_GETSTBL(p);
xd = (pxd_t *) & p->slot[stbl[0]];
/* get the child page block address */
bn = addressPXD(xd);
psize = lengthPXD(xd) << JFS_SBI(ip->i_sb)->l2bsize;
/* unpin the parent page */
DT_PUTPAGE(mp);
}
}
/*
* dtReadNext()
*
* function: get the page of the specified offset (pn:index)
*
* return: if (offset > eof), bn = -1;
*
* note: if index > nextindex of the target leaf page,
* start with 1st entry of next leaf page;
*/
static int dtReadNext(struct inode *ip, loff_t * offset,
struct btstack * btstack)
{
int rc = 0;
struct dtoffset {
s16 pn;
s16 index;
s32 unused;
} *dtoffset = (struct dtoffset *) offset;
s64 bn;
struct metapage *mp;
dtpage_t *p;
int index;
int pn;
s8 *stbl;
struct btframe *btsp, *parent;
pxd_t *xd;
/*
* get leftmost leaf page pinned
*/
if ((rc = dtReadFirst(ip, btstack)))
return rc;
/* get leaf page */
DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
/* get the start offset (pn:index) */
pn = dtoffset->pn - 1; /* Now pn = 0 represents leftmost leaf */
index = dtoffset->index;
/* start at leftmost page ? */
if (pn == 0) {
/* offset beyond eof ? */
if (index < p->header.nextindex)
goto out;
if (p->header.flag & BT_ROOT) {
bn = -1;
goto out;
}
/* start with 1st entry of next leaf page */
dtoffset->pn++;
dtoffset->index = index = 0;
goto a;
}
/* start at non-leftmost page: scan parent pages for large pn */
if (p->header.flag & BT_ROOT) {
bn = -1;
goto out;
}
/* start after next leaf page ? */
if (pn > 1)
goto b;
/* get leaf page pn = 1 */
a:
bn = le64_to_cpu(p->header.next);
/* unpin leaf page */
DT_PUTPAGE(mp);
/* offset beyond eof ? */
if (bn == 0) {
bn = -1;
goto out;
}
goto c;
/*
* scan last internal page level to get target leaf page
*/
b:
/* unpin leftmost leaf page */
DT_PUTPAGE(mp);
/* get left most parent page */
btsp = btstack->top;
parent = btsp - 1;
bn = parent->bn;
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* scan parent pages at last internal page level */
while (pn >= p->header.nextindex) {
pn -= p->header.nextindex;
/* get next parent page address */
bn = le64_to_cpu(p->header.next);
/* unpin current parent page */
DT_PUTPAGE(mp);
/* offset beyond eof ? */
if (bn == 0) {
bn = -1;
goto out;
}
/* get next parent page */
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* update parent page stack frame */
parent->bn = bn;
}
/* get leaf page address */
stbl = DT_GETSTBL(p);
xd = (pxd_t *) & p->slot[stbl[pn]];
bn = addressPXD(xd);
/* unpin parent page */
DT_PUTPAGE(mp);
/*
* get target leaf page
*/
c:
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/*
* leaf page has been completed:
* start with 1st entry of next leaf page
*/
if (index >= p->header.nextindex) {
bn = le64_to_cpu(p->header.next);
/* unpin leaf page */
DT_PUTPAGE(mp);
/* offset beyond eof ? */
if (bn == 0) {
bn = -1;
goto out;
}
/* get next leaf page */
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc)
return rc;
/* start with 1st entry of next leaf page */
dtoffset->pn++;
dtoffset->index = 0;
}
out:
/* return target leaf page pinned */
btsp = btstack->top;
btsp->bn = bn;
btsp->index = dtoffset->index;
btsp->mp = mp;
return 0;
}
/*
* dtCompare()
*
* function: compare search key with an internal entry
*
* return:
* < 0 if k is < record
* = 0 if k is = record
* > 0 if k is > record
*/
static int dtCompare(struct component_name * key, /* search key */
dtpage_t * p, /* directory page */
int si)
{ /* entry slot index */
wchar_t *kname;
__le16 *name;
int klen, namlen, len, rc;
struct idtentry *ih;
struct dtslot *t;
/*
* force the left-most key on internal pages, at any level of
* the tree, to be less than any search key.
* this obviates having to update the leftmost key on an internal
* page when the user inserts a new key in the tree smaller than
* anything that has been stored.
*
* (? if/when dtSearch() narrows down to 1st entry (index = 0),
* at any internal page at any level of the tree,
* it descends to child of the entry anyway -
* ? make the entry as min size dummy entry)
*
* if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
* return (1);
*/
kname = key->name;
klen = key->namlen;
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
name = ih->name;
namlen = ih->namlen;
len = min(namlen, DTIHDRDATALEN);
/* compare with head/only segment */
len = min(klen, len);
if ((rc = UniStrncmp_le(kname, name, len)))
return rc;
klen -= len;
namlen -= len;
/* compare with additional segment(s) */
kname += len;
while (klen > 0 && namlen > 0) {
/* compare with next name segment */
t = (struct dtslot *) & p->slot[si];
len = min(namlen, DTSLOTDATALEN);
len = min(klen, len);
name = t->name;
if ((rc = UniStrncmp_le(kname, name, len)))
return rc;
klen -= len;
namlen -= len;
kname += len;
si = t->next;
}
return (klen - namlen);
}
/*
* ciCompare()
*
* function: compare search key with an (leaf/internal) entry
*
* return:
* < 0 if k is < record
* = 0 if k is = record
* > 0 if k is > record
*/
static int ciCompare(struct component_name * key, /* search key */
dtpage_t * p, /* directory page */
int si, /* entry slot index */
int flag)
{
wchar_t *kname, x;
__le16 *name;
int klen, namlen, len, rc;
struct ldtentry *lh;
struct idtentry *ih;
struct dtslot *t;
int i;
/*
* force the left-most key on internal pages, at any level of
* the tree, to be less than any search key.
* this obviates having to update the leftmost key on an internal
* page when the user inserts a new key in the tree smaller than
* anything that has been stored.
*
* (? if/when dtSearch() narrows down to 1st entry (index = 0),
* at any internal page at any level of the tree,
* it descends to child of the entry anyway -
* ? make the entry as min size dummy entry)
*
* if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
* return (1);
*/
kname = key->name;
klen = key->namlen;
/*
* leaf page entry
*/
if (p->header.flag & BT_LEAF) {
lh = (struct ldtentry *) & p->slot[si];
si = lh->next;
name = lh->name;
namlen = lh->namlen;
if (flag & JFS_DIR_INDEX)
len = min(namlen, DTLHDRDATALEN);
else
len = min(namlen, DTLHDRDATALEN_LEGACY);
}
/*
* internal page entry
*/
else {
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
name = ih->name;
namlen = ih->namlen;
len = min(namlen, DTIHDRDATALEN);
}
/* compare with head/only segment */
len = min(klen, len);
for (i = 0; i < len; i++, kname++, name++) {
/* only uppercase if case-insensitive support is on */
if ((flag & JFS_OS2) == JFS_OS2)
x = UniToupper(le16_to_cpu(*name));
else
x = le16_to_cpu(*name);
if ((rc = *kname - x))
return rc;
}
klen -= len;
namlen -= len;
/* compare with additional segment(s) */
while (klen > 0 && namlen > 0) {
/* compare with next name segment */
t = (struct dtslot *) & p->slot[si];
len = min(namlen, DTSLOTDATALEN);
len = min(klen, len);
name = t->name;
for (i = 0; i < len; i++, kname++, name++) {
/* only uppercase if case-insensitive support is on */
if ((flag & JFS_OS2) == JFS_OS2)
x = UniToupper(le16_to_cpu(*name));
else
x = le16_to_cpu(*name);
if ((rc = *kname - x))
return rc;
}
klen -= len;
namlen -= len;
si = t->next;
}
return (klen - namlen);
}
/*
* ciGetLeafPrefixKey()
*
* function: compute prefix of suffix compression
* from two adjacent leaf entries
* across page boundary
*
* return: non-zero on error
*
*/
static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, struct component_name * key, int flag)
{
int klen, namlen;
wchar_t *pl, *pr, *kname;
struct component_name lkey;
struct component_name rkey;
lkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
GFP_KERNEL);
if (lkey.name == NULL)
return -ENOMEM;
rkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
GFP_KERNEL);
if (rkey.name == NULL) {
kfree(lkey.name);
return -ENOMEM;
}
/* get left and right key */
dtGetKey(lp, li, &lkey, flag);
lkey.name[lkey.namlen] = 0;
if ((flag & JFS_OS2) == JFS_OS2)
ciToUpper(&lkey);
dtGetKey(rp, ri, &rkey, flag);
rkey.name[rkey.namlen] = 0;
if ((flag & JFS_OS2) == JFS_OS2)
ciToUpper(&rkey);
/* compute prefix */
klen = 0;
kname = key->name;
namlen = min(lkey.namlen, rkey.namlen);
for (pl = lkey.name, pr = rkey.name;
namlen; pl++, pr++, namlen--, klen++, kname++) {
*kname = *pr;
if (*pl != *pr) {
key->namlen = klen + 1;
goto free_names;
}
}
/* l->namlen <= r->namlen since l <= r */
if (lkey.namlen < rkey.namlen) {
*kname = *pr;
key->namlen = klen + 1;
} else /* l->namelen == r->namelen */
key->namlen = klen;
free_names:
kfree(lkey.name);
kfree(rkey.name);
return 0;
}
/*
* dtGetKey()
*
* function: get key of the entry
*/
static void dtGetKey(dtpage_t * p, int i, /* entry index */
struct component_name * key, int flag)
{
int si;
s8 *stbl;
struct ldtentry *lh;
struct idtentry *ih;
struct dtslot *t;
int namlen, len;
wchar_t *kname;
__le16 *name;
/* get entry */
stbl = DT_GETSTBL(p);
si = stbl[i];
if (p->header.flag & BT_LEAF) {
lh = (struct ldtentry *) & p->slot[si];
si = lh->next;
namlen = lh->namlen;
name = lh->name;
if (flag & JFS_DIR_INDEX)
len = min(namlen, DTLHDRDATALEN);
else
len = min(namlen, DTLHDRDATALEN_LEGACY);
} else {
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
namlen = ih->namlen;
name = ih->name;
len = min(namlen, DTIHDRDATALEN);
}
key->namlen = namlen;
kname = key->name;
/*
* move head/only segment
*/
UniStrncpy_from_le(kname, name, len);
/*
* move additional segment(s)
*/
while (si >= 0) {
/* get next segment */
t = &p->slot[si];
kname += len;
namlen -= len;
len = min(namlen, DTSLOTDATALEN);
UniStrncpy_from_le(kname, t->name, len);
si = t->next;
}
}
/*
* dtInsertEntry()
*
* function: allocate free slot(s) and
* write a leaf/internal entry
*
* return: entry slot index
*/
static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
ddata_t * data, struct dt_lock ** dtlock)
{
struct dtslot *h, *t;
struct ldtentry *lh = NULL;
struct idtentry *ih = NULL;
int hsi, fsi, klen, len, nextindex;
wchar_t *kname;
__le16 *name;
s8 *stbl;
pxd_t *xd;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
s64 bn = 0;
struct metapage *mp = NULL;
klen = key->namlen;
kname = key->name;
/* allocate a free slot */
hsi = fsi = p->header.freelist;
h = &p->slot[fsi];
p->header.freelist = h->next;
--p->header.freecnt;
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = hsi;
/* write head/only segment */
if (p->header.flag & BT_LEAF) {
lh = (struct ldtentry *) h;
lh->next = h->next;
lh->inumber = cpu_to_le32(data->leaf.ino);
lh->namlen = klen;
name = lh->name;
if (data->leaf.ip) {
len = min(klen, DTLHDRDATALEN);
if (!(p->header.flag & BT_ROOT))
bn = addressPXD(&p->header.self);
lh->index = cpu_to_le32(add_index(data->leaf.tid,
data->leaf.ip,
bn, index));
} else
len = min(klen, DTLHDRDATALEN_LEGACY);
} else {
ih = (struct idtentry *) h;
ih->next = h->next;
xd = (pxd_t *) ih;
*xd = data->xd;
ih->namlen = klen;
name = ih->name;
len = min(klen, DTIHDRDATALEN);
}
UniStrncpy_to_le(name, kname, len);
n = 1;
xsi = hsi;
/* write additional segment(s) */
t = h;
klen -= len;
while (klen) {
/* get free slot */
fsi = p->header.freelist;
t = &p->slot[fsi];
p->header.freelist = t->next;
--p->header.freecnt;
/* is next slot contiguous ? */
if (fsi != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = fsi;
n = 0;
}
kname += len;
len = min(klen, DTSLOTDATALEN);
UniStrncpy_to_le(t->name, kname, len);
n++;
xsi = fsi;
klen -= len;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
/* terminate last/only segment */
if (h == t) {
/* single segment entry */
if (p->header.flag & BT_LEAF)
lh->next = -1;
else
ih->next = -1;
} else
/* multi-segment entry */
t->next = -1;
/* if insert into middle, shift right succeeding entries in stbl */
stbl = DT_GETSTBL(p);
nextindex = p->header.nextindex;
if (index < nextindex) {
memmove(stbl + index + 1, stbl + index, nextindex - index);
if ((p->header.flag & BT_LEAF) && data->leaf.ip) {
s64 lblock;
/*
* Need to update slot number for entries that moved
* in the stbl
*/
mp = NULL;
for (n = index + 1; n <= nextindex; n++) {
lh = (struct ldtentry *) & (p->slot[stbl[n]]);
modify_index(data->leaf.tid, data->leaf.ip,
le32_to_cpu(lh->index), bn, n,
&mp, &lblock);
}
if (mp)
release_metapage(mp);
}
}
stbl[index] = hsi;
/* advance next available entry index of stbl */
++p->header.nextindex;
}
/*
* dtMoveEntry()
*
* function: move entries from split/left page to new/right page
*
* nextindex of dst page and freelist/freecnt of both pages
* are updated.
*/
static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
int do_index)
{
int ssi, next; /* src slot index */
int di; /* dst entry index */
int dsi; /* dst slot index */
s8 *sstbl, *dstbl; /* sorted entry table */
int snamlen, len;
struct ldtentry *slh, *dlh = NULL;
struct idtentry *sih, *dih = NULL;
struct dtslot *h, *s, *d;
struct dt_lock *sdtlck = *sdtlock, *ddtlck = *ddtlock;
struct lv *slv, *dlv;
int xssi, ns, nd;
int sfsi;
sstbl = (s8 *) & sp->slot[sp->header.stblindex];
dstbl = (s8 *) & dp->slot[dp->header.stblindex];
dsi = dp->header.freelist; /* first (whole page) free slot */
sfsi = sp->header.freelist;
/* linelock destination entry slot */
dlv = & ddtlck->lv[ddtlck->index];
dlv->offset = dsi;
/* linelock source entry slot */
slv = & sdtlck->lv[sdtlck->index];
slv->offset = sstbl[si];
xssi = slv->offset - 1;
/*
* move entries
*/
ns = nd = 0;
for (di = 0; si < sp->header.nextindex; si++, di++) {
ssi = sstbl[si];
dstbl[di] = dsi;
/* is next slot contiguous ? */
if (ssi != xssi + 1) {
/* close current linelock */
slv->length = ns;
sdtlck->index++;
/* open new linelock */
if (sdtlck->index < sdtlck->maxcnt)
slv++;
else {
sdtlck = (struct dt_lock *) txLinelock(sdtlck);
slv = & sdtlck->lv[0];
}
slv->offset = ssi;
ns = 0;
}
/*
* move head/only segment of an entry
*/
/* get dst slot */
h = d = &dp->slot[dsi];
/* get src slot and move */
s = &sp->slot[ssi];
if (sp->header.flag & BT_LEAF) {
/* get source entry */
slh = (struct ldtentry *) s;
dlh = (struct ldtentry *) h;
snamlen = slh->namlen;
if (do_index) {
len = min(snamlen, DTLHDRDATALEN);
dlh->index = slh->index; /* little-endian */
} else
len = min(snamlen, DTLHDRDATALEN_LEGACY);
memcpy(dlh, slh, 6 + len * 2);
next = slh->next;
/* update dst head/only segment next field */
dsi++;
dlh->next = dsi;
} else {
sih = (struct idtentry *) s;
snamlen = sih->namlen;
len = min(snamlen, DTIHDRDATALEN);
dih = (struct idtentry *) h;
memcpy(dih, sih, 10 + len * 2);
next = sih->next;
dsi++;
dih->next = dsi;
}
/* free src head/only segment */
s->next = sfsi;
s->cnt = 1;
sfsi = ssi;
ns++;
nd++;
xssi = ssi;
/*
* move additional segment(s) of the entry
*/
snamlen -= len;
while ((ssi = next) >= 0) {
/* is next slot contiguous ? */
if (ssi != xssi + 1) {
/* close current linelock */
slv->length = ns;
sdtlck->index++;
/* open new linelock */
if (sdtlck->index < sdtlck->maxcnt)
slv++;
else {
sdtlck =
(struct dt_lock *)
txLinelock(sdtlck);
slv = & sdtlck->lv[0];
}
slv->offset = ssi;
ns = 0;
}
/* get next source segment */
s = &sp->slot[ssi];
/* get next destination free slot */
d++;
len = min(snamlen, DTSLOTDATALEN);
UniStrncpy_le(d->name, s->name, len);
ns++;
nd++;
xssi = ssi;
dsi++;
d->next = dsi;
/* free source segment */
next = s->next;
s->next = sfsi;
s->cnt = 1;
sfsi = ssi;
snamlen -= len;
} /* end while */
/* terminate dst last/only segment */
if (h == d) {
/* single segment entry */
if (dp->header.flag & BT_LEAF)
dlh->next = -1;
else
dih->next = -1;
} else
/* multi-segment entry */
d->next = -1;
} /* end for */
/* close current linelock */
slv->length = ns;
sdtlck->index++;
*sdtlock = sdtlck;
dlv->length = nd;
ddtlck->index++;
*ddtlock = ddtlck;
/* update source header */
sp->header.freelist = sfsi;
sp->header.freecnt += nd;
/* update destination header */
dp->header.nextindex = di;
dp->header.freelist = dsi;
dp->header.freecnt -= nd;
}
/*
* dtDeleteEntry()
*
* function: free a (leaf/internal) entry
*
* log freelist header, stbl, and each segment slot of entry
* (even though last/only segment next field is modified,
* physical image logging requires all segment slots of
* the entry logged to avoid applying previous updates
* to the same slots)
*/
static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock)
{
int fsi; /* free entry slot index */
s8 *stbl;
struct dtslot *t;
int si, freecnt;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
/* get free entry slot index */
stbl = DT_GETSTBL(p);
fsi = stbl[fi];
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = fsi;
/* get the head/only segment */
t = &p->slot[fsi];
if (p->header.flag & BT_LEAF)
si = ((struct ldtentry *) t)->next;
else
si = ((struct idtentry *) t)->next;
t->next = si;
t->cnt = 1;
n = freecnt = 1;
xsi = fsi;
/* find the last/only segment */
while (si >= 0) {
/* is next slot contiguous ? */
if (si != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
n = 0;
}
n++;
xsi = si;
freecnt++;
t = &p->slot[si];
t->cnt = 1;
si = t->next;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
/* update freelist */
t->next = p->header.freelist;
p->header.freelist = fsi;
p->header.freecnt += freecnt;
/* if delete from middle,
* shift left the succedding entries in the stbl
*/
si = p->header.nextindex;
if (fi < si - 1)
memmove(&stbl[fi], &stbl[fi + 1], si - fi - 1);
p->header.nextindex--;
}
/*
* dtTruncateEntry()
*
* function: truncate a (leaf/internal) entry
*
* log freelist header, stbl, and each segment slot of entry
* (even though last/only segment next field is modified,
* physical image logging requires all segment slots of
* the entry logged to avoid applying previous updates
* to the same slots)
*/
static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock)
{
int tsi; /* truncate entry slot index */
s8 *stbl;
struct dtslot *t;
int si, freecnt;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int fsi, xsi, n;
/* get free entry slot index */
stbl = DT_GETSTBL(p);
tsi = stbl[ti];
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = tsi;
/* get the head/only segment */
t = &p->slot[tsi];
ASSERT(p->header.flag & BT_INTERNAL);
((struct idtentry *) t)->namlen = 0;
si = ((struct idtentry *) t)->next;
((struct idtentry *) t)->next = -1;
n = 1;
freecnt = 0;
fsi = si;
xsi = tsi;
/* find the last/only segment */
while (si >= 0) {
/* is next slot contiguous ? */
if (si != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
n = 0;
}
n++;
xsi = si;
freecnt++;
t = &p->slot[si];
t->cnt = 1;
si = t->next;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
/* update freelist */
if (freecnt == 0)
return;
t->next = p->header.freelist;
p->header.freelist = fsi;
p->header.freecnt += freecnt;
}
/*
* dtLinelockFreelist()
*/
static void dtLinelockFreelist(dtpage_t * p, /* directory page */
int m, /* max slot index */
struct dt_lock ** dtlock)
{
int fsi; /* free entry slot index */
struct dtslot *t;
int si;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
/* get free entry slot index */
fsi = p->header.freelist;
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = fsi;
n = 1;
xsi = fsi;
t = &p->slot[fsi];
si = t->next;
/* find the last/only segment */
while (si < m && si >= 0) {
/* is next slot contiguous ? */
if (si != xsi + 1) {
/* close current linelock */
lv->length = n;
dtlck->index++;
/* open new linelock */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
n = 0;
}
n++;
xsi = si;
t = &p->slot[si];
si = t->next;
}
/* close current linelock */
lv->length = n;
dtlck->index++;
*dtlock = dtlck;
}
/*
* NAME: dtModify
*
* FUNCTION: Modify the inode number part of a directory entry
*
* PARAMETERS:
* tid - Transaction id
* ip - Inode of parent directory
* key - Name of entry to be modified
* orig_ino - Original inode number expected in entry
* new_ino - New inode number to put into entry
* flag - JFS_RENAME
*
* RETURNS:
* -ESTALE - If entry found does not match orig_ino passed in
* -ENOENT - If no entry can be found to match key
* 0 - If successfully modified entry
*/
int dtModify(tid_t tid, struct inode *ip,
struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag)
{
int rc;
s64 bn;
struct metapage *mp;
dtpage_t *p;
int index;
struct btstack btstack;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
s8 *stbl;
int entry_si; /* entry slot index */
struct ldtentry *entry;
/*
* search for the entry to modify:
*
* dtSearch() returns (leaf page pinned, index at which to modify).
*/
if ((rc = dtSearch(ip, key, orig_ino, &btstack, flag)))
return rc;
/* retrieve search result */
DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
BT_MARK_DIRTY(mp, ip);
/*
* acquire a transaction lock on the leaf page of named entry
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) & tlck->lock;
/* get slot index of the entry */
stbl = DT_GETSTBL(p);
entry_si = stbl[index];
/* linelock entry */
ASSERT(dtlck->index == 0);
lv = & dtlck->lv[0];
lv->offset = entry_si;
lv->length = 1;
dtlck->index++;
/* get the head/only segment */
entry = (struct ldtentry *) & p->slot[entry_si];
/* substitute the inode number of the entry */
entry->inumber = cpu_to_le32(new_ino);
/* unpin the leaf page */
DT_PUTPAGE(mp);
return 0;
}
| gpl-2.0 |
wanam/Adam-Kernel-Note4-N910C | fs/squashfs/symlink.c | 9209 | 3870 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* symlink.c
*/
/*
* This file implements code to handle symbolic links.
*
* The data contents of symbolic links are stored inside the symbolic
* link inode within the inode table. This allows the normally small symbolic
* link to be compressed as part of the inode table, achieving much greater
* compression than if the symbolic link was compressed individually.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "xattr.h"
static int squashfs_symlink_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
int index = page->index << PAGE_CACHE_SHIFT;
u64 block = squashfs_i(inode)->start;
int offset = squashfs_i(inode)->offset;
int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
int bytes, copied;
void *pageaddr;
struct squashfs_cache_entry *entry;
TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
"%llx, offset %x\n", page->index, block, offset);
/*
* Skip index bytes into symlink metadata.
*/
if (index) {
bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
index);
if (bytes < 0) {
ERROR("Unable to read symlink [%llx:%x]\n",
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
goto error_out;
}
}
/*
* Read length bytes from symlink metadata. Squashfs_read_metadata
* is not used here because it can sleep and we want to use
* kmap_atomic to map the page. Instead call the underlying
* squashfs_cache_get routine. As length bytes may overlap metadata
* blocks, we may need to call squashfs_cache_get multiple times.
*/
for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
if (entry->error) {
ERROR("Unable to read symlink [%llx:%x]\n",
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
squashfs_cache_put(entry);
goto error_out;
}
pageaddr = kmap_atomic(page);
copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
length - bytes);
if (copied == length - bytes)
memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
else
block = entry->next_index;
kunmap_atomic(pageaddr);
squashfs_cache_put(entry);
}
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
return 0;
error_out:
SetPageError(page);
unlock_page(page);
return 0;
}
const struct address_space_operations squashfs_symlink_aops = {
.readpage = squashfs_symlink_readpage
};
const struct inode_operations squashfs_symlink_inode_ops = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getxattr = generic_getxattr,
.listxattr = squashfs_listxattr
};
| gpl-2.0 |
Callie-Cacophony/CAF-test-kernel | drivers/media/rc/keymaps/rc-technisat-usb2.c | 9465 | 2794 | /* rc-technisat-usb2.c - Keytable for SkyStar HD USB
*
* Copyright (C) 2010 Patrick Boettcher,
* Kernel Labs Inc. PO Box 745, St James, NY 11780
*
* Development was sponsored by Technisat Digital UK Limited, whose
* registered office is Witan Gate House 500 - 600 Witan Gate West,
* Milton Keynes, MK9 1SH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND
* TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO
* THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR
* FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE COPYRIGHT HOLDER
* NOR TECHNISAT DIGITAL UK LIMITED SHALL BE LIABLE FOR ANY SPECIAL,
* DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
* IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS PROGRAM. See the
* GNU General Public License for more details.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table technisat_usb2[] = {
{0x0a0c, KEY_POWER},
{0x0a01, KEY_1},
{0x0a02, KEY_2},
{0x0a03, KEY_3},
{0x0a0d, KEY_MUTE},
{0x0a04, KEY_4},
{0x0a05, KEY_5},
{0x0a06, KEY_6},
{0x0a38, KEY_VIDEO}, /* EXT */
{0x0a07, KEY_7},
{0x0a08, KEY_8},
{0x0a09, KEY_9},
{0x0a00, KEY_0},
{0x0a4f, KEY_INFO},
{0x0a20, KEY_CHANNELUP},
{0x0a52, KEY_MENU},
{0x0a11, KEY_VOLUMEUP},
{0x0a57, KEY_OK},
{0x0a10, KEY_VOLUMEDOWN},
{0x0a2f, KEY_EPG},
{0x0a21, KEY_CHANNELDOWN},
{0x0a22, KEY_REFRESH},
{0x0a3c, KEY_TEXT},
{0x0a76, KEY_ENTER}, /* HOOK */
{0x0a0f, KEY_HELP},
{0x0a6b, KEY_RED},
{0x0a6c, KEY_GREEN},
{0x0a6d, KEY_YELLOW},
{0x0a6e, KEY_BLUE},
{0x0a29, KEY_STOP},
{0x0a23, KEY_LANGUAGE},
{0x0a53, KEY_TV},
{0x0a0a, KEY_PROGRAM},
};
static struct rc_map_list technisat_usb2_map = {
.map = {
.scan = technisat_usb2,
.size = ARRAY_SIZE(technisat_usb2),
.rc_type = RC_TYPE_RC5,
.name = RC_MAP_TECHNISAT_USB2,
}
};
static int __init init_rc_map(void)
{
return rc_map_register(&technisat_usb2_map);
}
static void __exit exit_rc_map(void)
{
rc_map_unregister(&technisat_usb2_map);
}
module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
androidrbox/android_kernel_amazon_bueller | arch/x86/kernel/crash_dump_64.c | 12537 | 1327 | /*
* Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
if (userbuf) {
if (copy_to_user(buf, vaddr + offset, csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, vaddr + offset, csize);
set_iounmap_nonlazy();
iounmap(vaddr);
return csize;
}
| gpl-2.0 |
defconoi/L-Kernel-Mako | arch/x86/kernel/crash_dump_64.c | 12537 | 1327 | /*
* Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
if (userbuf) {
if (copy_to_user(buf, vaddr + offset, csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, vaddr + offset, csize);
set_iounmap_nonlazy();
iounmap(vaddr);
return csize;
}
| gpl-2.0 |
Haxynox/kernel_samsung_n7100-old | drivers/media/video/exynos/gsc/coef.c | 506 | 10262 | /* linux/drivers/media/video/exynos/gsc/coef.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* G-scaler poly-phase filter coefficients
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 2 of the License,
* or (at your option) any later version.
*/
#include <linux/types.h>
#include "gsc-core.h"
/* 8-tap Filter Coefficient */
const int h_coef_8t[7][16][8] = {
{ /* Ratio <= 65536 (~8:8) */
{ 0, 0, 0, 128, 0, 0, 0, 0 },/* 0 */
{ -1, 2, -6, 127, 7, -2, 1, 0 },/* 1 */
{ -1, 4, -12, 125, 16, -5, 1, 0 },/* 2 */
{ -1, 5, -15, 120, 25, -8, 2, 0 },/* 3 */
{ -1, 6, -18, 114, 35, -10, 3, -1 },/* 4 */
{ -1, 6, -20, 107, 46, -13, 4, -1 },/* 5 */
{ -2, 7, -21, 99, 57, -16, 5, -1 },/* 6 */
{ -1, 6, -20, 89, 68, -18, 5, -1 },/* 7 */
{ -1, 6, -20, 79, 79, -20, 6, -1 },/* 8 */
{ -1, 5, -18, 68, 89, -20, 6, -1 },/* 9 */
{ -1, 5, -16, 57, 99, -21, 7, -2 },/* 10 */
{ -1, 4, -13, 46, 107, -20, 6, -1 },/* 11 */
{ -1, 3, -10, 35, 114, -18, 6, -1 },/* 12 */
{ 0, 2, -8, 25, 120, -15, 5, -1 },/* 13 */
{ 0, 1, -5, 16, 125, -12, 4, -1 },/* 14 */
{ 0, 1, -2, 7, 127, -6, 2, -1 } /* 15 */
},
{ /* 65536 < Ratio <= 74898 (~8:7) */
{ 3, -8, 14, 111, 13, -8, 3, 0 },/* 0 */
{ 2, -6, 7, 112, 21, -10, 3, -1 },/* 1 */
{ 2, -4, 1, 110, 28, -12, 4, -1 },/* 2 */
{ 1, -2, -3, 106, 36, -13, 4, -1 },/* 3 */
{ 1, -1, -7, 103, 44, -15, 4, -1 },/* 4 */
{ 1, 1, -11, 97, 53, -16, 4, -1 },/* 5 */
{ 0, 2, -13, 91, 61, -16, 4, -1 },/* 6 */
{ 0, 3, -15, 85, 69, -17, 4, -1 },/* 7 */
{ 0, 3, -16, 77, 77, -16, 3, 0 },/* 8 */
{ -1, 4, -17, 69, 85, -15, 3, 0 },/* 9 */
{ -1, 4, -16, 61, 91, -13, 2, 0 },/* 10 */
{ -1, 4, -16, 53, 97, -11, 1, 1 },/* 11 */
{ -1, 4, -15, 44, 103, -7, -1, 1 },/* 12 */
{ -1, 4, -13, 36, 106, -3, -2, 1 },/* 13 */
{ -1, 4, -12, 28, 110, 1, -4, 2 },/* 14 */
{ -1, 3, -10, 21, 112, 7, -6, 2 } /* 15 */
},
{ /* 74898 < Ratio <= 87381 (~8:6) */
{ 2, -11, 25, 96, 25, -11, 2, 0 },/* 0 */
{ 2, -10, 19, 96, 31, -12, 2, 0 },/* 1 */
{ 2, -9, 14, 94, 37, -12, 2, 0 },/* 2 */
{ 2, -8, 10, 92, 43, -12, 1, 0 },/* 3 */
{ 2, -7, 5, 90, 49, -12, 1, 0 },/* 4 */
{ 2, -5, 1, 86, 55, -12, 0, 1 },/* 5 */
{ 2, -4, -2, 82, 61, -11, -1, 1 },/* 6 */
{ 1, -3, -5, 77, 67, -9, -1, 1 },/* 7 */
{ 1, -2, -7, 72, 72, -7, -2, 1 },/* 8 */
{ 1, -1, -9, 67, 77, -5, -3, 1 },/* 9 */
{ 1, -1, -11, 61, 82, -2, -4, 2 },/* 10 */
{ 1, 0, -12, 55, 86, 1, -5, 2 },/* 11 */
{ 0, 1, -12, 49, 90, 5, -7, 2 },/* 12 */
{ 0, 1, -12, 43, 92, 10, -8, 2 },/* 13 */
{ 0, 2, -12, 37, 94, 14, -9, 2 },/* 14 */
{ 0, 2, -12, 31, 96, 19, -10, 2 } /* 15 */
},
{ /* 87381 < Ratio <= 104857 (~8:5) */
{ -1, -8, 33, 80, 33, -8, -1, 0 },/* 0 */
{ -1, -8, 28, 80, 37, -7, -2, 1 },/* 1 */
{ 0, -8, 24, 79, 41, -7, -2, 1 },/* 2 */
{ 0, -8, 20, 78, 46, -6, -3, 1 },/* 3 */
{ 0, -8, 16, 76, 50, -4, -3, 1 },/* 4 */
{ 0, -7, 13, 74, 54, -3, -4, 1 },/* 5 */
{ 1, -7, 10, 71, 58, -1, -5, 1 },/* 6 */
{ 1, -6, 6, 68, 62, 1, -5, 1 },/* 7 */
{ 1, -6, 4, 65, 65, 4, -6, 1 },/* 8 */
{ 1, -5, 1, 62, 68, 6, -6, 1 },/* 9 */
{ 1, -5, -1, 58, 71, 10, -7, 1 },/* 10 */
{ 1, -4, -3, 54, 74, 13, -7, 0 },/* 11 */
{ 1, -3, -4, 50, 76, 16, -8, 0 },/* 12 */
{ 1, -3, -6, 46, 78, 20, -8, 0 },/* 13 */
{ 1, -2, -7, 41, 79, 24, -8, 0 },/* 14 */
{ 1, -2, -7, 37, 80, 28, -8, -1 } /* 15 */
},
{ /* 104857 < Ratio <= 131072 (~8:4) */
{ -3, 0, 35, 64, 35, 0, -3, 0 },/* 0 */
{ -3, -1, 32, 64, 38, 1, -3, 0 },/* 1 */
{ -2, -2, 29, 63, 41, 2, -3, 0 },/* 2 */
{ -2, -3, 27, 63, 43, 4, -4, 0 },/* 3 */
{ -2, -3, 24, 61, 46, 6, -4, 0 },/* 4 */
{ -2, -3, 21, 60, 49, 7, -4, 0 },/* 5 */
{ -1, -4, 19, 59, 51, 9, -4, -1 },/* 6 */
{ -1, -4, 16, 57, 53, 12, -4, -1 },/* 7 */
{ -1, -4, 14, 55, 55, 14, -4, -1 },/* 8 */
{ -1, -4, 12, 53, 57, 16, -4, -1 },/* 9 */
{ -1, -4, 9, 51, 59, 19, -4, -1 },/* 10 */
{ 0, -4, 7, 49, 60, 21, -3, -2 },/* 11 */
{ 0, -4, 6, 46, 61, 24, -3, -2 },/* 12 */
{ 0, -4, 4, 43, 63, 27, -3, -2 },/* 13 */
{ 0, -3, 2, 41, 63, 29, -2, -2 },/* 14 */
{ 0, -3, 1, 38, 64, 32, -1, -3 } /* 15 */
},
{ /* 131072 < Ratio <= 174762 (~8:3) */
{ -1, 8, 33, 48, 33, 8, -1, 0 },/* 0 */
{ -1, 7, 31, 49, 35, 9, -1, -1 },/* 1 */
{ -1, 6, 30, 49, 36, 10, -1, -1 },/* 2 */
{ -1, 5, 28, 48, 38, 12, -1, -1 },/* 3 */
{ -1, 4, 26, 48, 39, 13, 0, -1 },/* 4 */
{ -1, 3, 24, 47, 41, 15, 0, -1 },/* 5 */
{ -1, 2, 23, 47, 42, 16, 0, -1 },/* 6 */
{ -1, 2, 21, 45, 43, 18, 1, -1 },/* 7 */
{ -1, 1, 19, 45, 45, 19, 1, -1 },/* 8 */
{ -1, 1, 18, 43, 45, 21, 2, -1 },/* 9 */
{ -1, 0, 16, 42, 47, 23, 2, -1 },/* 10 */
{ -1, 0, 15, 41, 47, 24, 3, -1 },/* 11 */
{ -1, 0, 13, 39, 48, 26, 4, -1 },/* 12 */
{ -1, -1, 12, 38, 48, 28, 5, -1 },/* 13 */
{ -1, -1, 10, 36, 49, 30, 6, -1 },/* 14 */
{ -1, -1, 9, 35, 49, 31, 7, -1 } /* 15 */
},
{ /* 174762 < Ratio <= 262144 (~8:2) */
{ 2, 13, 30, 38, 30, 13, 2, 0 },/* 0 */
{ 2, 12, 29, 38, 30, 14, 3, 0 },/* 1 */
{ 2, 11, 28, 38, 31, 15, 3, 0 },/* 2 */
{ 2, 10, 26, 38, 32, 16, 4, 0 },/* 3 */
{ 1, 10, 26, 37, 33, 17, 4, 0 },/* 4 */
{ 1, 9, 24, 37, 34, 18, 5, 0 },/* 5 */
{ 1, 8, 24, 37, 34, 19, 5, 0 },/* 6 */
{ 1, 7, 22, 36, 35, 20, 6, 1 },/* 7 */
{ 1, 6, 21, 36, 36, 21, 6, 1 },/* 8 */
{ 1, 6, 20, 35, 36, 22, 7, 1 },/* 9 */
{ 0, 5, 19, 34, 37, 24, 8, 1 },/* 10 */
{ 0, 5, 18, 34, 37, 24, 9, 1 },/* 11 */
{ 0, 4, 17, 33, 37, 26, 10, 1 },/* 12 */
{ 0, 4, 16, 32, 38, 26, 10, 2 },/* 13 */
{ 0, 3, 15, 31, 38, 28, 11, 2 },/* 14 */
{ 0, 3, 14, 30, 38, 29, 12, 2 } /* 15 */
}
};
/* 4-tap Filter Coefficient */
const int v_coef_4t[7][16][4] = {
{ /* Ratio <= 65536 (~8:8) */
{ 0, 128, 0, 0 },/* 0 */
{ -4, 127, 5, 0 },/* 1 */
{ -6, 124, 11, -1 },/* 2 */
{ -8, 118, 19, -1 },/* 3 */
{ -8, 111, 27, -2 },/* 4 */
{ -8, 102, 37, -3 },/* 5 */
{ -8, 92, 48, -4 },/* 6 */
{ -7, 81, 59, -5 },/* 7 */
{ -6, 70, 70, -6 },/* 8 */
{ -5, 59, 81, -7 },/* 9 */
{ -4, 48, 92, -8 },/* 10 */
{ -3, 37, 102, -8 },/* 11 */
{ -2, 27, 111, -8 },/* 12 */
{ -1, 19, 118, -8 },/* 13 */
{ -1, 11, 124, -6 },/* 14 */
{ 0, 5, 127, -4 } /* 15 */
},
{ /* 65536 < Ratio <= 74898 (~8:7) */
{ 8, 112, 8, 0 },/* 0 */
{ 4, 111, 14, -1 },/* 1 */
{ 1, 109, 20, -2 },/* 2 */
{ -2, 105, 27, -2 },/* 3 */
{ -3, 100, 34, -3 },/* 4 */
{ -5, 93, 43, -3 },/* 5 */
{ -5, 86, 51, -4 },/* 6 */
{ -5, 77, 60, -4 },/* 7 */
{ -5, 69, 69, -5 },/* 8 */
{ -4, 60, 77, -5 },/* 9 */
{ -4, 51, 86, -5 },/* 10 */
{ -3, 43, 93, -5 },/* 11 */
{ -3, 34, 100, -3 },/* 12 */
{ -2, 27, 105, -2 },/* 13 */
{ -2, 20, 109, 1 },/* 14 */
{ -1, 14, 111, 4 } /* 15 */
},
{ /* 74898 < Ratio <= 87381 (~8:6) */
{ 16, 96, 16, 0 },/* 0 */
{ 12, 97, 21, -2 },/* 1 */
{ 8, 96, 26, -2 },/* 2 */
{ 5, 93, 32, -2 },/* 3 */
{ 2, 89, 39, -2 },/* 4 */
{ 0, 84, 46, -2 },/* 5 */
{ -1, 79, 53, -3 },/* 6 */
{ -2, 73, 59, -2 },/* 7 */
{ -2, 66, 66, -2 },/* 8 */
{ -2, 59, 73, -2 },/* 9 */
{ -3, 53, 79, -1 },/* 10 */
{ -2, 46, 84, 0 },/* 11 */
{ -2, 39, 89, 2 },/* 12 */
{ -2, 32, 93, 5 },/* 13 */
{ -2, 26, 96, 8 },/* 14 */
{ -2, 21, 97, 12 } /* 15 */
},
{ /* 87381 < Ratio <= 104857 (~8:5) */
{ 22, 84, 22, 0 },/* 0 */
{ 18, 85, 26, -1 },/* 1 */
{ 14, 84, 31, -1 },/* 2 */
{ 11, 82, 36, -1 },/* 3 */
{ 8, 79, 42, -1 },/* 4 */
{ 6, 76, 47, -1 },/* 5 */
{ 4, 72, 52, 0 },/* 6 */
{ 2, 68, 58, 0 },/* 7 */
{ 1, 63, 63, 1 },/* 8 */
{ 0, 58, 68, 2 },/* 9 */
{ 0, 52, 72, 4 },/* 10 */
{ -1, 47, 76, 6 },/* 11 */
{ -1, 42, 79, 8 },/* 12 */
{ -1, 36, 82, 11 },/* 13 */
{ -1, 31, 84, 14 },/* 14 */
{ -1, 26, 85, 18 } /* 15 */
},
{ /* 104857 < Ratio <= 131072 (~8:4) */
{ 26, 76, 26, 0 },/* 0 */
{ 22, 76, 30, 0 },/* 1 */
{ 19, 75, 34, 0 },/* 2 */
{ 16, 73, 38, 1 },/* 3 */
{ 13, 71, 43, 1 },/* 4 */
{ 10, 69, 47, 2 },/* 5 */
{ 8, 66, 51, 3 },/* 6 */
{ 6, 63, 55, 4 },/* 7 */
{ 5, 59, 59, 5 },/* 8 */
{ 4, 55, 63, 6 },/* 9 */
{ 3, 51, 66, 8 },/* 10 */
{ 2, 47, 69, 10 },/* 11 */
{ 1, 43, 71, 13 },/* 12 */
{ 1, 38, 73, 16 },/* 13 */
{ 0, 34, 75, 19 },/* 14 */
{ 0, 30, 76, 22 } /* 15 */
},
{ /* 131072 < Ratio <= 174762 (~8:3) */
{ 29, 70, 29, 0 },/* 0 */
{ 26, 68, 32, 2 },/* 1 */
{ 23, 67, 36, 2 },/* 2 */
{ 20, 66, 39, 3 },/* 3 */
{ 17, 65, 43, 3 },/* 4 */
{ 15, 63, 46, 4 },/* 5 */
{ 12, 61, 50, 5 },/* 6 */
{ 10, 58, 53, 7 },/* 7 */
{ 8, 56, 56, 8 },/* 8 */
{ 7, 53, 58, 10 },/* 9 */
{ 5, 50, 61, 12 },/* 10 */
{ 4, 46, 63, 15 },/* 11 */
{ 3, 43, 65, 17 },/* 12 */
{ 3, 39, 66, 20 },/* 13 */
{ 2, 36, 67, 23 },/* 14 */
{ 2, 32, 68, 26 } /* 15 */
},
{ /* 174762 < Ratio <= 262144 (~8:2) */
{ 32, 64, 32, 0 },/* 0 */
{ 28, 63, 34, 3 },/* 1 */
{ 25, 62, 37, 4 },/* 2 */
{ 22, 62, 40, 4 },/* 3 */
{ 19, 61, 43, 5 },/* 4 */
{ 17, 59, 46, 6 },/* 5 */
{ 15, 58, 48, 7 },/* 6 */
{ 13, 55, 51, 9 },/* 7 */
{ 11, 53, 53, 11 },/* 8 */
{ 9, 51, 55, 13 },/* 9 */
{ 7, 48, 58, 15 },/* 10 */
{ 6, 46, 59, 17 },/* 11 */
{ 5, 43, 61, 19 },/* 12 */
{ 4, 40, 62, 22 },/* 13 */
{ 4, 37, 62, 25 },/* 14 */
{ 3, 34, 63, 28 } /* 15 */
}
};
| gpl-2.0 |
Cpasjuste/android_kernel_lg_p990 | sound/pcmcia/pdaudiocf/pdaudiocf.c | 506 | 7665 | /*
* Driver for Sound Core PDAudioCF soundcard
*
* Copyright (c) 2003 by Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <sound/core.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/cisreg.h>
#include "pdaudiocf.h"
#include <sound/initval.h>
#include <linux/init.h>
/*
*/
#define CARD_NAME "PDAudio-CF"
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Sound Core " CARD_NAME);
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Sound Core," CARD_NAME "}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
/*
*/
static struct snd_card *card_list[SNDRV_CARDS];
/*
* prototypes
*/
static int pdacf_config(struct pcmcia_device *link);
static void snd_pdacf_detach(struct pcmcia_device *p_dev);
static void pdacf_release(struct pcmcia_device *link)
{
pcmcia_disable_device(link);
}
/*
* destructor
*/
static int snd_pdacf_free(struct snd_pdacf *pdacf)
{
struct pcmcia_device *link = pdacf->p_dev;
pdacf_release(link);
card_list[pdacf->index] = NULL;
pdacf->card = NULL;
kfree(pdacf);
return 0;
}
static int snd_pdacf_dev_free(struct snd_device *device)
{
struct snd_pdacf *chip = device->device_data;
return snd_pdacf_free(chip);
}
/*
* snd_pdacf_attach - attach callback for cs
*/
static int snd_pdacf_probe(struct pcmcia_device *link)
{
int i, err;
struct snd_pdacf *pdacf;
struct snd_card *card;
static struct snd_device_ops ops = {
.dev_free = snd_pdacf_dev_free,
};
snd_printdd(KERN_DEBUG "pdacf_attach called\n");
/* find an empty slot from the card list */
for (i = 0; i < SNDRV_CARDS; i++) {
if (! card_list[i])
break;
}
if (i >= SNDRV_CARDS) {
snd_printk(KERN_ERR "pdacf: too many cards found\n");
return -EINVAL;
}
if (! enable[i])
return -ENODEV; /* disabled explicitly */
/* ok, create a card instance */
err = snd_card_create(index[i], id[i], THIS_MODULE, 0, &card);
if (err < 0) {
snd_printk(KERN_ERR "pdacf: cannot create a card instance\n");
return err;
}
pdacf = snd_pdacf_create(card);
if (!pdacf) {
snd_card_free(card);
return -ENOMEM;
}
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pdacf, &ops);
if (err < 0) {
kfree(pdacf);
snd_card_free(card);
return err;
}
snd_card_set_dev(card, &handle_to_dev(link));
pdacf->index = i;
card_list[i] = card;
pdacf->p_dev = link;
link->priv = pdacf;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.NumPorts1 = 16;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT | IRQ_FORCED_PULSE;
// link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
link->irq.IRQInfo1 = 0 /* | IRQ_LEVEL_ID */;
link->irq.Handler = pdacf_interrupt;
link->irq.Instance = pdacf;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
return pdacf_config(link);
}
/**
* snd_pdacf_assign_resources - initialize the hardware and card instance.
* @port: i/o port for the card
* @irq: irq number for the card
*
* this function assigns the specified port and irq, boot the card,
* create pcm and control instances, and initialize the rest hardware.
*
* returns 0 if successful, or a negative error code.
*/
static int snd_pdacf_assign_resources(struct snd_pdacf *pdacf, int port, int irq)
{
int err;
struct snd_card *card = pdacf->card;
snd_printdd(KERN_DEBUG "pdacf assign resources: port = 0x%x, irq = %d\n", port, irq);
pdacf->port = port;
pdacf->irq = irq;
pdacf->chip_status |= PDAUDIOCF_STAT_IS_CONFIGURED;
err = snd_pdacf_ak4117_create(pdacf);
if (err < 0)
return err;
strcpy(card->driver, "PDAudio-CF");
sprintf(card->shortname, "Core Sound %s", card->driver);
sprintf(card->longname, "%s at 0x%x, irq %i",
card->shortname, port, irq);
err = snd_pdacf_pcm_new(pdacf);
if (err < 0)
return err;
if ((err = snd_card_register(card)) < 0)
return err;
return 0;
}
/*
* snd_pdacf_detach - detach callback for cs
*/
static void snd_pdacf_detach(struct pcmcia_device *link)
{
struct snd_pdacf *chip = link->priv;
snd_printdd(KERN_DEBUG "pdacf_detach called\n");
if (chip->chip_status & PDAUDIOCF_STAT_IS_CONFIGURED)
snd_pdacf_powerdown(chip);
chip->chip_status |= PDAUDIOCF_STAT_IS_STALE; /* to be sure */
snd_card_disconnect(chip->card);
snd_card_free_when_closed(chip->card);
}
/*
* configuration callback
*/
static int pdacf_config(struct pcmcia_device *link)
{
struct snd_pdacf *pdacf = link->priv;
int ret;
snd_printdd(KERN_DEBUG "pdacf_config called\n");
link->conf.ConfigIndex = 0x5;
ret = pcmcia_request_io(link, &link->io);
if (ret)
goto failed;
ret = pcmcia_request_irq(link, &link->irq);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
goto failed;
link->dev_node = &pdacf->node;
return 0;
failed:
pcmcia_disable_device(link);
return -ENODEV;
}
#ifdef CONFIG_PM
static int pdacf_suspend(struct pcmcia_device *link)
{
struct snd_pdacf *chip = link->priv;
snd_printdd(KERN_DEBUG "SUSPEND\n");
if (chip) {
snd_printdd(KERN_DEBUG "snd_pdacf_suspend calling\n");
snd_pdacf_suspend(chip, PMSG_SUSPEND);
}
return 0;
}
static int pdacf_resume(struct pcmcia_device *link)
{
struct snd_pdacf *chip = link->priv;
snd_printdd(KERN_DEBUG "RESUME\n");
if (pcmcia_dev_present(link)) {
if (chip) {
snd_printdd(KERN_DEBUG "calling snd_pdacf_resume\n");
snd_pdacf_resume(chip);
}
}
snd_printdd(KERN_DEBUG "resume done!\n");
return 0;
}
#endif
/*
* Module entry points
*/
static struct pcmcia_device_id snd_pdacf_ids[] = {
/* this is too general PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45), */
PCMCIA_DEVICE_PROD_ID12("Core Sound","PDAudio-CF",0x396d19d2,0x71717b49),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, snd_pdacf_ids);
static struct pcmcia_driver pdacf_cs_driver = {
.owner = THIS_MODULE,
.drv = {
.name = "snd-pdaudiocf",
},
.probe = snd_pdacf_probe,
.remove = snd_pdacf_detach,
.id_table = snd_pdacf_ids,
#ifdef CONFIG_PM
.suspend = pdacf_suspend,
.resume = pdacf_resume,
#endif
};
static int __init init_pdacf(void)
{
return pcmcia_register_driver(&pdacf_cs_driver);
}
static void __exit exit_pdacf(void)
{
pcmcia_unregister_driver(&pdacf_cs_driver);
}
module_init(init_pdacf);
module_exit(exit_pdacf);
| gpl-2.0 |
uclinux-cortexm/uclinux | arch/m68k/bvme6000/rtc.c | 762 | 4353 | /*
* Real Time Clock interface for Linux on the BVME6000
*
* Based on the PC driver by Paul Gortmaker.
*/
#define RTC_VERSION "1.00"
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/ioport.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */
#include <linux/bcd.h>
#include <asm/bvme6000hw.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/setup.h>
/*
* We sponge a minor off of the misc major. No need slurping
* up another valuable major dev number for this. If you add
* an ioctl, make sure you don't conflict with SPARC's RTC
* ioctls.
*/
static unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
static char rtc_status;
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
unsigned char msr;
unsigned long flags;
struct rtc_time wtime;
void __user *argp = (void __user *)arg;
switch (cmd) {
case RTC_RD_TIME: /* Read the time/date from RTC */
{
local_irq_save(flags);
/* Ensure clock and real-time-mode-register are accessible */
msr = rtc->msr & 0xc0;
rtc->msr = 0x40;
memset(&wtime, 0, sizeof(struct rtc_time));
do {
wtime.tm_sec = bcd2bin(rtc->bcd_sec);
wtime.tm_min = bcd2bin(rtc->bcd_min);
wtime.tm_hour = bcd2bin(rtc->bcd_hr);
wtime.tm_mday = bcd2bin(rtc->bcd_dom);
wtime.tm_mon = bcd2bin(rtc->bcd_mth)-1;
wtime.tm_year = bcd2bin(rtc->bcd_year);
if (wtime.tm_year < 70)
wtime.tm_year += 100;
wtime.tm_wday = bcd2bin(rtc->bcd_dow)-1;
} while (wtime.tm_sec != bcd2bin(rtc->bcd_sec));
rtc->msr = msr;
local_irq_restore(flags);
return copy_to_user(argp, &wtime, sizeof wtime) ?
-EFAULT : 0;
}
case RTC_SET_TIME: /* Set the RTC */
{
struct rtc_time rtc_tm;
unsigned char mon, day, hrs, min, sec, leap_yr;
unsigned int yrs;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
return -EFAULT;
yrs = rtc_tm.tm_year;
if (yrs < 1900)
yrs += 1900;
mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm.tm_mday;
hrs = rtc_tm.tm_hour;
min = rtc_tm.tm_min;
sec = rtc_tm.tm_sec;
leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
if ((mon > 12) || (mon < 1) || (day == 0))
return -EINVAL;
if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
return -EINVAL;
if ((hrs >= 24) || (min >= 60) || (sec >= 60))
return -EINVAL;
if (yrs >= 2070)
return -EINVAL;
local_irq_save(flags);
/* Ensure clock and real-time-mode-register are accessible */
msr = rtc->msr & 0xc0;
rtc->msr = 0x40;
rtc->t0cr_rtmr = yrs%4;
rtc->bcd_tenms = 0;
rtc->bcd_sec = bin2bcd(sec);
rtc->bcd_min = bin2bcd(min);
rtc->bcd_hr = bin2bcd(hrs);
rtc->bcd_dom = bin2bcd(day);
rtc->bcd_mth = bin2bcd(mon);
rtc->bcd_year = bin2bcd(yrs%100);
if (rtc_tm.tm_wday >= 0)
rtc->bcd_dow = bin2bcd(rtc_tm.tm_wday+1);
rtc->t0cr_rtmr = yrs%4 | 0x08;
rtc->msr = msr;
local_irq_restore(flags);
return 0;
}
default:
return -EINVAL;
}
}
/*
* We enforce only one user at a time here with the open/close.
* Also clear the previous interrupt data on an open, and clean
* up things on a close.
*/
static int rtc_open(struct inode *inode, struct file *file)
{
lock_kernel();
if(rtc_status) {
unlock_kernel();
return -EBUSY;
}
rtc_status = 1;
unlock_kernel();
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
lock_kernel();
rtc_status = 0;
unlock_kernel();
return 0;
}
/*
* The various file operations we support.
*/
static const struct file_operations rtc_fops = {
.ioctl = rtc_ioctl,
.open = rtc_open,
.release = rtc_release,
};
static struct miscdevice rtc_dev = {
.minor = RTC_MINOR,
.name = "rtc",
.fops = &rtc_fops
};
static int __init rtc_DP8570A_init(void)
{
if (!MACH_IS_BVME6000)
return -ENODEV;
printk(KERN_INFO "DP8570A Real Time Clock Driver v%s\n", RTC_VERSION);
return misc_register(&rtc_dev);
}
module_init(rtc_DP8570A_init);
| gpl-2.0 |
antmicro/linux-sunxi | drivers/leds/led-class-flash.c | 762 | 10034 | /*
* LED Flash class interface
*
* Copyright (C) 2015 Samsung Electronics Co., Ltd.
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/led-class-flash.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "leds.h"
#define has_flash_op(fled_cdev, op) \
(fled_cdev && fled_cdev->ops->op)
#define call_flash_op(fled_cdev, op, args...) \
((has_flash_op(fled_cdev, op)) ? \
(fled_cdev->ops->op(fled_cdev, args)) : \
-EINVAL)
static const char * const led_flash_fault_names[] = {
"led-over-voltage",
"flash-timeout-exceeded",
"controller-over-temperature",
"controller-short-circuit",
"led-power-supply-over-current",
"indicator-led-fault",
"led-under-voltage",
"controller-under-voltage",
"led-over-temperature",
};
static ssize_t flash_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
unsigned long state;
ssize_t ret;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
ret = kstrtoul(buf, 10, &state);
if (ret)
goto unlock;
ret = led_set_flash_brightness(fled_cdev, state);
if (ret < 0)
goto unlock;
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static ssize_t flash_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
/* no lock needed for this */
led_update_flash_brightness(fled_cdev);
return sprintf(buf, "%u\n", fled_cdev->brightness.val);
}
static DEVICE_ATTR_RW(flash_brightness);
static ssize_t max_flash_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
return sprintf(buf, "%u\n", fled_cdev->brightness.max);
}
static DEVICE_ATTR_RO(max_flash_brightness);
static ssize_t flash_strobe_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
unsigned long state;
ssize_t ret = -EINVAL;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
ret = kstrtoul(buf, 10, &state);
if (ret)
goto unlock;
if (state < 0 || state > 1) {
ret = -EINVAL;
goto unlock;
}
ret = led_set_flash_strobe(fled_cdev, state);
if (ret < 0)
goto unlock;
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static ssize_t flash_strobe_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
bool state;
int ret;
/* no lock needed for this */
ret = led_get_flash_strobe(fled_cdev, &state);
if (ret < 0)
return ret;
return sprintf(buf, "%u\n", state);
}
static DEVICE_ATTR_RW(flash_strobe);
static ssize_t flash_timeout_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
unsigned long flash_timeout;
ssize_t ret;
mutex_lock(&led_cdev->led_access);
if (led_sysfs_is_disabled(led_cdev)) {
ret = -EBUSY;
goto unlock;
}
ret = kstrtoul(buf, 10, &flash_timeout);
if (ret)
goto unlock;
ret = led_set_flash_timeout(fled_cdev, flash_timeout);
if (ret < 0)
goto unlock;
ret = size;
unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
static ssize_t flash_timeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
return sprintf(buf, "%u\n", fled_cdev->timeout.val);
}
static DEVICE_ATTR_RW(flash_timeout);
static ssize_t max_flash_timeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
return sprintf(buf, "%u\n", fled_cdev->timeout.max);
}
static DEVICE_ATTR_RO(max_flash_timeout);
static ssize_t flash_fault_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
u32 fault, mask = 0x1;
char *pbuf = buf;
int i, ret, buf_len;
ret = led_get_flash_fault(fled_cdev, &fault);
if (ret < 0)
return -EINVAL;
*buf = '\0';
for (i = 0; i < LED_NUM_FLASH_FAULTS; ++i) {
if (fault & mask) {
buf_len = sprintf(pbuf, "%s ",
led_flash_fault_names[i]);
pbuf += buf_len;
}
mask <<= 1;
}
return sprintf(buf, "%s\n", buf);
}
static DEVICE_ATTR_RO(flash_fault);
static struct attribute *led_flash_strobe_attrs[] = {
&dev_attr_flash_strobe.attr,
NULL,
};
static struct attribute *led_flash_timeout_attrs[] = {
&dev_attr_flash_timeout.attr,
&dev_attr_max_flash_timeout.attr,
NULL,
};
static struct attribute *led_flash_brightness_attrs[] = {
&dev_attr_flash_brightness.attr,
&dev_attr_max_flash_brightness.attr,
NULL,
};
static struct attribute *led_flash_fault_attrs[] = {
&dev_attr_flash_fault.attr,
NULL,
};
static const struct attribute_group led_flash_strobe_group = {
.attrs = led_flash_strobe_attrs,
};
static const struct attribute_group led_flash_timeout_group = {
.attrs = led_flash_timeout_attrs,
};
static const struct attribute_group led_flash_brightness_group = {
.attrs = led_flash_brightness_attrs,
};
static const struct attribute_group led_flash_fault_group = {
.attrs = led_flash_fault_attrs,
};
static void led_flash_resume(struct led_classdev *led_cdev)
{
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
call_flash_op(fled_cdev, flash_brightness_set,
fled_cdev->brightness.val);
call_flash_op(fled_cdev, timeout_set, fled_cdev->timeout.val);
}
static void led_flash_init_sysfs_groups(struct led_classdev_flash *fled_cdev)
{
struct led_classdev *led_cdev = &fled_cdev->led_cdev;
const struct led_flash_ops *ops = fled_cdev->ops;
const struct attribute_group **flash_groups = fled_cdev->sysfs_groups;
int num_sysfs_groups = 0;
flash_groups[num_sysfs_groups++] = &led_flash_strobe_group;
if (ops->flash_brightness_set)
flash_groups[num_sysfs_groups++] = &led_flash_brightness_group;
if (ops->timeout_set)
flash_groups[num_sysfs_groups++] = &led_flash_timeout_group;
if (ops->fault_get)
flash_groups[num_sysfs_groups++] = &led_flash_fault_group;
led_cdev->groups = flash_groups;
}
int led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
struct led_classdev *led_cdev;
const struct led_flash_ops *ops;
int ret;
if (!fled_cdev)
return -EINVAL;
led_cdev = &fled_cdev->led_cdev;
if (led_cdev->flags & LED_DEV_CAP_FLASH) {
if (!led_cdev->brightness_set_sync)
return -EINVAL;
ops = fled_cdev->ops;
if (!ops || !ops->strobe_set)
return -EINVAL;
led_cdev->flash_resume = led_flash_resume;
/* Select the sysfs attributes to be created for the device */
led_flash_init_sysfs_groups(fled_cdev);
}
/* Register led class device */
ret = led_classdev_register(parent, led_cdev);
if (ret < 0)
return ret;
/* Setting a torch brightness needs to have immediate effect */
led_cdev->flags &= ~SET_BRIGHTNESS_ASYNC;
led_cdev->flags |= SET_BRIGHTNESS_SYNC;
return 0;
}
EXPORT_SYMBOL_GPL(led_classdev_flash_register);
void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev)
{
if (!fled_cdev)
return;
led_classdev_unregister(&fled_cdev->led_cdev);
}
EXPORT_SYMBOL_GPL(led_classdev_flash_unregister);
static void led_clamp_align(struct led_flash_setting *s)
{
u32 v, offset;
v = s->val + s->step / 2;
v = clamp(v, s->min, s->max);
offset = v - s->min;
offset = s->step * (offset / s->step);
s->val = s->min + offset;
}
int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout)
{
struct led_classdev *led_cdev = &fled_cdev->led_cdev;
struct led_flash_setting *s = &fled_cdev->timeout;
s->val = timeout;
led_clamp_align(s);
if (!(led_cdev->flags & LED_SUSPENDED))
return call_flash_op(fled_cdev, timeout_set, s->val);
return 0;
}
EXPORT_SYMBOL_GPL(led_set_flash_timeout);
int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault)
{
return call_flash_op(fled_cdev, fault_get, fault);
}
EXPORT_SYMBOL_GPL(led_get_flash_fault);
int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
u32 brightness)
{
struct led_classdev *led_cdev = &fled_cdev->led_cdev;
struct led_flash_setting *s = &fled_cdev->brightness;
s->val = brightness;
led_clamp_align(s);
if (!(led_cdev->flags & LED_SUSPENDED))
return call_flash_op(fled_cdev, flash_brightness_set, s->val);
return 0;
}
EXPORT_SYMBOL_GPL(led_set_flash_brightness);
int led_update_flash_brightness(struct led_classdev_flash *fled_cdev)
{
struct led_flash_setting *s = &fled_cdev->brightness;
u32 brightness;
if (has_flash_op(fled_cdev, flash_brightness_get)) {
int ret = call_flash_op(fled_cdev, flash_brightness_get,
&brightness);
if (ret < 0)
return ret;
s->val = brightness;
}
return 0;
}
EXPORT_SYMBOL_GPL(led_update_flash_brightness);
MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("LED Flash class interface");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
MadManA/MadKernel_cooper | drivers/video/xilinxfb.c | 762 | 14069 | /*
* Xilinx TFT frame buffer driver
*
* Author: MontaVista Software, Inc.
* source@mvista.com
*
* 2002-2007 (c) MontaVista Software, Inc.
* 2007 (c) Secret Lab Technologies, Ltd.
* 2009 (c) Xilinx Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/*
* This driver was based on au1100fb.c by MontaVista rewritten for 2.6
* by Embedded Alley Solutions <source@embeddedalley.com>, which in turn
* was based on skeletonfb.c, Skeleton for a frame buffer device by
* Geert Uytterhoeven.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/xilinxfb.h>
#include <linux/slab.h>
#include <asm/dcr.h>
#define DRIVER_NAME "xilinxfb"
/*
* Xilinx calls it "PLB TFT LCD Controller" though it can also be used for
* the VGA port on the Xilinx ML40x board. This is a hardware display
* controller for a 640x480 resolution TFT or VGA screen.
*
* The interface to the framebuffer is nice and simple. There are two
* control registers. The first tells the LCD interface where in memory
* the frame buffer is (only the 11 most significant bits are used, so
* don't start thinking about scrolling). The second allows the LCD to
* be turned on or off as well as rotated 180 degrees.
*
* In case of direct PLB access the second control register will be at
* an offset of 4 as compared to the DCR access where the offset is 1
* i.e. REG_CTRL. So this is taken care in the function
* xilinx_fb_out_be32 where it left shifts the offset 2 times in case of
* direct PLB access.
*/
#define NUM_REGS 2
#define REG_FB_ADDR 0
#define REG_CTRL 1
#define REG_CTRL_ENABLE 0x0001
#define REG_CTRL_ROTATE 0x0002
/*
* The hardware only handles a single mode: 640x480 24 bit true
* color. Each pixel gets a word (32 bits) of memory. Within each word,
* the 8 most significant bits are ignored, the next 8 bits are the red
* level, the next 8 bits are the green level and the 8 least
* significant bits are the blue level. Each row of the LCD uses 1024
* words, but only the first 640 pixels are displayed with the other 384
* words being ignored. There are 480 rows.
*/
#define BYTES_PER_PIXEL 4
#define BITS_PER_PIXEL (BYTES_PER_PIXEL * 8)
#define RED_SHIFT 16
#define GREEN_SHIFT 8
#define BLUE_SHIFT 0
#define PALETTE_ENTRIES_NO 16 /* passed to fb_alloc_cmap() */
/*
* Default xilinxfb configuration
*/
static struct xilinxfb_platform_data xilinx_fb_default_pdata = {
.xres = 640,
.yres = 480,
.xvirt = 1024,
.yvirt = 480,
};
/*
* Here are the default fb_fix_screeninfo and fb_var_screeninfo structures
*/
static struct fb_fix_screeninfo xilinx_fb_fix = {
.id = "Xilinx",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.accel = FB_ACCEL_NONE
};
static struct fb_var_screeninfo xilinx_fb_var = {
.bits_per_pixel = BITS_PER_PIXEL,
.red = { RED_SHIFT, 8, 0 },
.green = { GREEN_SHIFT, 8, 0 },
.blue = { BLUE_SHIFT, 8, 0 },
.transp = { 0, 0, 0 },
.activate = FB_ACTIVATE_NOW
};
#define PLB_ACCESS_FLAG 0x1 /* 1 = PLB, 0 = DCR */
struct xilinxfb_drvdata {
struct fb_info info; /* FB driver info record */
phys_addr_t regs_phys; /* phys. address of the control
registers */
void __iomem *regs; /* virt. address of the control
registers */
dcr_host_t dcr_host;
unsigned int dcr_len;
void *fb_virt; /* virt. address of the frame buffer */
dma_addr_t fb_phys; /* phys. address of the frame buffer */
int fb_alloced; /* Flag, was the fb memory alloced? */
u8 flags; /* features of the driver */
u32 reg_ctrl_default;
u32 pseudo_palette[PALETTE_ENTRIES_NO];
/* Fake palette of 16 colors */
};
#define to_xilinxfb_drvdata(_info) \
container_of(_info, struct xilinxfb_drvdata, info)
/*
* The XPS TFT Controller can be accessed through PLB or DCR interface.
* To perform the read/write on the registers we need to check on
* which bus its connected and call the appropriate write API.
*/
static void xilinx_fb_out_be32(struct xilinxfb_drvdata *drvdata, u32 offset,
u32 val)
{
if (drvdata->flags & PLB_ACCESS_FLAG)
out_be32(drvdata->regs + (offset << 2), val);
else
dcr_write(drvdata->dcr_host, offset, val);
}
static int
xilinx_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *fbi)
{
u32 *palette = fbi->pseudo_palette;
if (regno >= PALETTE_ENTRIES_NO)
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale.
* grayscale = 0.30*R + 0.59*G + 0.11*B */
red = green = blue =
(red * 77 + green * 151 + blue * 28 + 127) >> 8;
}
/* fbi->fix.visual is always FB_VISUAL_TRUECOLOR */
/* We only handle 8 bits of each color. */
red >>= 8;
green >>= 8;
blue >>= 8;
palette[regno] = (red << RED_SHIFT) | (green << GREEN_SHIFT) |
(blue << BLUE_SHIFT);
return 0;
}
static int
xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
{
struct xilinxfb_drvdata *drvdata = to_xilinxfb_drvdata(fbi);
switch (blank_mode) {
case FB_BLANK_UNBLANK:
/* turn on panel */
xilinx_fb_out_be32(drvdata, REG_CTRL, drvdata->reg_ctrl_default);
break;
case FB_BLANK_NORMAL:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
/* turn off panel */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
default:
break;
}
return 0; /* success */
}
static struct fb_ops xilinxfb_ops =
{
.owner = THIS_MODULE,
.fb_setcolreg = xilinx_fb_setcolreg,
.fb_blank = xilinx_fb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/* ---------------------------------------------------------------------
* Bus independent setup/teardown
*/
static int xilinxfb_assign(struct device *dev,
struct xilinxfb_drvdata *drvdata,
unsigned long physaddr,
struct xilinxfb_platform_data *pdata)
{
int rc;
int fbsize = pdata->xvirt * pdata->yvirt * BYTES_PER_PIXEL;
if (drvdata->flags & PLB_ACCESS_FLAG) {
/*
* Map the control registers in if the controller
* is on direct PLB interface.
*/
if (!request_mem_region(physaddr, 8, DRIVER_NAME)) {
dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
physaddr);
rc = -ENODEV;
goto err_region;
}
drvdata->regs_phys = physaddr;
drvdata->regs = ioremap(physaddr, 8);
if (!drvdata->regs) {
dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
physaddr);
rc = -ENODEV;
goto err_map;
}
}
/* Allocate the framebuffer memory */
if (pdata->fb_phys) {
drvdata->fb_phys = pdata->fb_phys;
drvdata->fb_virt = ioremap(pdata->fb_phys, fbsize);
} else {
drvdata->fb_alloced = 1;
drvdata->fb_virt = dma_alloc_coherent(dev, PAGE_ALIGN(fbsize),
&drvdata->fb_phys, GFP_KERNEL);
}
if (!drvdata->fb_virt) {
dev_err(dev, "Could not allocate frame buffer memory\n");
rc = -ENOMEM;
if (drvdata->flags & PLB_ACCESS_FLAG)
goto err_fbmem;
else
goto err_region;
}
/* Clear (turn to black) the framebuffer */
memset_io((void __iomem *)drvdata->fb_virt, 0, fbsize);
/* Tell the hardware where the frame buffer is */
xilinx_fb_out_be32(drvdata, REG_FB_ADDR, drvdata->fb_phys);
/* Turn on the display */
drvdata->reg_ctrl_default = REG_CTRL_ENABLE;
if (pdata->rotate_screen)
drvdata->reg_ctrl_default |= REG_CTRL_ROTATE;
xilinx_fb_out_be32(drvdata, REG_CTRL,
drvdata->reg_ctrl_default);
/* Fill struct fb_info */
drvdata->info.device = dev;
drvdata->info.screen_base = (void __iomem *)drvdata->fb_virt;
drvdata->info.fbops = &xilinxfb_ops;
drvdata->info.fix = xilinx_fb_fix;
drvdata->info.fix.smem_start = drvdata->fb_phys;
drvdata->info.fix.smem_len = fbsize;
drvdata->info.fix.line_length = pdata->xvirt * BYTES_PER_PIXEL;
drvdata->info.pseudo_palette = drvdata->pseudo_palette;
drvdata->info.flags = FBINFO_DEFAULT;
drvdata->info.var = xilinx_fb_var;
drvdata->info.var.height = pdata->screen_height_mm;
drvdata->info.var.width = pdata->screen_width_mm;
drvdata->info.var.xres = pdata->xres;
drvdata->info.var.yres = pdata->yres;
drvdata->info.var.xres_virtual = pdata->xvirt;
drvdata->info.var.yres_virtual = pdata->yvirt;
/* Allocate a colour map */
rc = fb_alloc_cmap(&drvdata->info.cmap, PALETTE_ENTRIES_NO, 0);
if (rc) {
dev_err(dev, "Fail to allocate colormap (%d entries)\n",
PALETTE_ENTRIES_NO);
goto err_cmap;
}
/* Register new frame buffer */
rc = register_framebuffer(&drvdata->info);
if (rc) {
dev_err(dev, "Could not register frame buffer\n");
goto err_regfb;
}
if (drvdata->flags & PLB_ACCESS_FLAG) {
/* Put a banner in the log (for DEBUG) */
dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr,
drvdata->regs);
}
/* Put a banner in the log (for DEBUG) */
dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n",
(unsigned long long)drvdata->fb_phys, drvdata->fb_virt, fbsize);
return 0; /* success */
err_regfb:
fb_dealloc_cmap(&drvdata->info.cmap);
err_cmap:
if (drvdata->fb_alloced)
dma_free_coherent(dev, PAGE_ALIGN(fbsize), drvdata->fb_virt,
drvdata->fb_phys);
else
iounmap(drvdata->fb_virt);
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
err_fbmem:
if (drvdata->flags & PLB_ACCESS_FLAG)
iounmap(drvdata->regs);
err_map:
if (drvdata->flags & PLB_ACCESS_FLAG)
release_mem_region(physaddr, 8);
err_region:
kfree(drvdata);
dev_set_drvdata(dev, NULL);
return rc;
}
static int xilinxfb_release(struct device *dev)
{
struct xilinxfb_drvdata *drvdata = dev_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
xilinx_fb_blank(VESA_POWERDOWN, &drvdata->info);
#endif
unregister_framebuffer(&drvdata->info);
fb_dealloc_cmap(&drvdata->info.cmap);
if (drvdata->fb_alloced)
dma_free_coherent(dev, PAGE_ALIGN(drvdata->info.fix.smem_len),
drvdata->fb_virt, drvdata->fb_phys);
else
iounmap(drvdata->fb_virt);
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
/* Release the resources, as allocated based on interface */
if (drvdata->flags & PLB_ACCESS_FLAG) {
iounmap(drvdata->regs);
release_mem_region(drvdata->regs_phys, 8);
} else
dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
kfree(drvdata);
dev_set_drvdata(dev, NULL);
return 0;
}
/* ---------------------------------------------------------------------
* OF bus binding
*/
static int __devinit
xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
{
const u32 *prop;
u32 *p;
u32 tft_access;
struct xilinxfb_platform_data pdata;
struct resource res;
int size, rc, start;
struct xilinxfb_drvdata *drvdata;
/* Copy with the default pdata (not a ptr reference!) */
pdata = xilinx_fb_default_pdata;
dev_dbg(&op->dev, "xilinxfb_of_probe(%p, %p)\n", op, match);
/* Allocate the driver data region */
drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) {
dev_err(&op->dev, "Couldn't allocate device private record\n");
return -ENOMEM;
}
/*
* To check whether the core is connected directly to DCR or PLB
* interface and initialize the tft_access accordingly.
*/
p = (u32 *)of_get_property(op->dev.of_node, "xlnx,dcr-splb-slave-if", NULL);
tft_access = p ? *p : 0;
/*
* Fill the resource structure if its direct PLB interface
* otherwise fill the dcr_host structure.
*/
if (tft_access) {
drvdata->flags |= PLB_ACCESS_FLAG;
rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
goto err;
}
} else {
res.start = 0;
start = dcr_resource_start(op->dev.of_node, 0);
drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0);
drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len);
if (!DCR_MAP_OK(drvdata->dcr_host)) {
dev_err(&op->dev, "invalid DCR address\n");
goto err;
}
}
prop = of_get_property(op->dev.of_node, "phys-size", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.screen_width_mm = prop[0];
pdata.screen_height_mm = prop[1];
}
prop = of_get_property(op->dev.of_node, "resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xres = prop[0];
pdata.yres = prop[1];
}
prop = of_get_property(op->dev.of_node, "virtual-resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xvirt = prop[0];
pdata.yvirt = prop[1];
}
if (of_find_property(op->dev.of_node, "rotate-display", NULL))
pdata.rotate_screen = 1;
dev_set_drvdata(&op->dev, drvdata);
return xilinxfb_assign(&op->dev, drvdata, res.start, &pdata);
err:
kfree(drvdata);
return -ENODEV;
}
static int __devexit xilinxfb_of_remove(struct of_device *op)
{
return xilinxfb_release(&op->dev);
}
/* Match table for of_platform binding */
static struct of_device_id xilinxfb_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-tft-1.00.a", },
{ .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", },
{ .compatible = "xlnx,plb-dvi-cntlr-ref-1.00.c", },
{},
};
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
static struct of_platform_driver xilinxfb_of_driver = {
.probe = xilinxfb_of_probe,
.remove = __devexit_p(xilinxfb_of_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = xilinxfb_of_match,
},
};
/* ---------------------------------------------------------------------
* Module setup and teardown
*/
static int __init
xilinxfb_init(void)
{
return of_register_platform_driver(&xilinxfb_of_driver);
}
static void __exit
xilinxfb_cleanup(void)
{
of_unregister_platform_driver(&xilinxfb_of_driver);
}
module_init(xilinxfb_init);
module_exit(xilinxfb_cleanup);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx TFT frame buffer driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
openedev/streak_4.05_kernel | drivers/video/via/lcd.c | 762 | 38783 | /*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/via-core.h>
#include <linux/via_i2c.h>
#include "global.h"
#include "lcdtbl.h"
#define viafb_compact_res(x, y) (((x)<<16)|(y))
static struct _lcd_scaling_factor lcd_scaling_factor = {
/* LCD Horizontal Scaling Factor Register */
{LCD_HOR_SCALING_FACTOR_REG_NUM,
{{CR9F, 0, 1}, {CR77, 0, 7}, {CR79, 4, 5} } },
/* LCD Vertical Scaling Factor Register */
{LCD_VER_SCALING_FACTOR_REG_NUM,
{{CR79, 3, 3}, {CR78, 0, 7}, {CR79, 6, 7} } }
};
static struct _lcd_scaling_factor lcd_scaling_factor_CLE = {
/* LCD Horizontal Scaling Factor Register */
{LCD_HOR_SCALING_FACTOR_REG_NUM_CLE, {{CR77, 0, 7}, {CR79, 4, 5} } },
/* LCD Vertical Scaling Factor Register */
{LCD_VER_SCALING_FACTOR_REG_NUM_CLE, {{CR78, 0, 7}, {CR79, 6, 7} } }
};
static int check_lvds_chip(int device_id_subaddr, int device_id);
static bool lvds_identify_integratedlvds(void);
static void fp_id_to_vindex(int panel_id);
static int lvds_register_read(int index);
static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
int panel_vres);
static void via_pitch_alignment_patch_lcd(
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information
*plvds_chip_info);
static void lcd_patch_skew_dvp0(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info);
static void lcd_patch_skew_dvp1(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info);
static void lcd_patch_skew(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info);
static void integrated_lvds_disable(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info);
static void integrated_lvds_enable(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info);
static void lcd_powersequence_off(void);
static void lcd_powersequence_on(void);
static void fill_lcd_format(void);
static void check_diport_of_integrated_lvds(
struct lvds_chip_information *plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info);
static struct display_timing lcd_centering_timging(struct display_timing
mode_crt_reg,
struct display_timing panel_crt_reg);
static void viafb_load_scaling_factor_for_p4m900(int set_hres,
int set_vres, int panel_hres, int panel_vres);
static int check_lvds_chip(int device_id_subaddr, int device_id)
{
if (lvds_register_read(device_id_subaddr) == device_id)
return OK;
else
return FAIL;
}
void viafb_init_lcd_size(void)
{
DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n");
DEBUG_MSG(KERN_INFO
"viaparinfo->lvds_setting_info->get_lcd_size_method %d\n",
viaparinfo->lvds_setting_info->get_lcd_size_method);
switch (viaparinfo->lvds_setting_info->get_lcd_size_method) {
case GET_LCD_SIZE_BY_SYSTEM_BIOS:
break;
case GET_LCD_SZIE_BY_HW_STRAPPING:
break;
case GET_LCD_SIZE_BY_VGA_BIOS:
DEBUG_MSG(KERN_INFO "Get LCD Size method by VGA BIOS !!\n");
fp_id_to_vindex(viafb_lcd_panel_id);
DEBUG_MSG(KERN_INFO "LCD Panel_ID = %d\n",
viaparinfo->lvds_setting_info->lcd_panel_id);
break;
case GET_LCD_SIZE_BY_USER_SETTING:
DEBUG_MSG(KERN_INFO "Get LCD Size method by user setting !!\n");
fp_id_to_vindex(viafb_lcd_panel_id);
DEBUG_MSG(KERN_INFO "LCD Panel_ID = %d\n",
viaparinfo->lvds_setting_info->lcd_panel_id);
break;
default:
DEBUG_MSG(KERN_INFO "viafb_init_lcd_size fail\n");
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID1_800X600;
fp_id_to_vindex(LCD_PANEL_ID1_800X600);
}
viaparinfo->lvds_setting_info2->lcd_panel_id =
viaparinfo->lvds_setting_info->lcd_panel_id;
viaparinfo->lvds_setting_info2->lcd_panel_hres =
viaparinfo->lvds_setting_info->lcd_panel_hres;
viaparinfo->lvds_setting_info2->lcd_panel_vres =
viaparinfo->lvds_setting_info->lcd_panel_vres;
viaparinfo->lvds_setting_info2->device_lcd_dualedge =
viaparinfo->lvds_setting_info->device_lcd_dualedge;
viaparinfo->lvds_setting_info2->LCDDithering =
viaparinfo->lvds_setting_info->LCDDithering;
}
static bool lvds_identify_integratedlvds(void)
{
if (viafb_display_hardware_layout == HW_LAYOUT_LCD_EXTERNAL_LCD2) {
/* Two dual channel LCD (Internal LVDS + External LVDS): */
/* If we have an external LVDS, such as VT1636, we should
have its chip ID already. */
if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) {
viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name =
INTEGRATED_LVDS;
DEBUG_MSG(KERN_INFO "Support two dual channel LVDS! "
"(Internal LVDS + External LVDS)\n");
} else {
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
INTEGRATED_LVDS;
DEBUG_MSG(KERN_INFO "Not found external LVDS, "
"so can't support two dual channel LVDS!\n");
}
} else if (viafb_display_hardware_layout == HW_LAYOUT_LCD1_LCD2) {
/* Two single channel LCD (Internal LVDS + Internal LVDS): */
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
INTEGRATED_LVDS;
viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name =
INTEGRATED_LVDS;
DEBUG_MSG(KERN_INFO "Support two single channel LVDS! "
"(Internal LVDS + Internal LVDS)\n");
} else if (viafb_display_hardware_layout != HW_LAYOUT_DVI_ONLY) {
/* If we have found external LVDS, just use it,
otherwise, we will use internal LVDS as default. */
if (!viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) {
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
INTEGRATED_LVDS;
DEBUG_MSG(KERN_INFO "Found Integrated LVDS!\n");
}
} else {
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
NON_LVDS_TRANSMITTER;
DEBUG_MSG(KERN_INFO "Do not support LVDS!\n");
return false;
}
return true;
}
int viafb_lvds_trasmitter_identify(void)
{
if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
DEBUG_MSG(KERN_INFO
"Found VIA VT1636 LVDS on port i2c 0x31\n");
} else {
if (viafb_lvds_identify_vt1636(VIA_PORT_2C)) {
viaparinfo->chip_info->lvds_chip_info.i2c_port =
VIA_PORT_2C;
DEBUG_MSG(KERN_INFO
"Found VIA VT1636 LVDS on port gpio 0x2c\n");
}
}
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700)
lvds_identify_integratedlvds();
if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name)
return true;
/* Check for VT1631: */
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = VT1631_LVDS;
viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr =
VT1631_LVDS_I2C_ADDR;
if (check_lvds_chip(VT1631_DEVICE_ID_REG, VT1631_DEVICE_ID) != FAIL) {
DEBUG_MSG(KERN_INFO "\n VT1631 LVDS ! \n");
DEBUG_MSG(KERN_INFO "\n %2d",
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name);
DEBUG_MSG(KERN_INFO "\n %2d",
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name);
return OK;
}
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
NON_LVDS_TRANSMITTER;
viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr =
VT1631_LVDS_I2C_ADDR;
return FAIL;
}
static void fp_id_to_vindex(int panel_id)
{
DEBUG_MSG(KERN_INFO "fp_get_panel_id()\n");
if (panel_id > LCD_PANEL_ID_MAXIMUM)
viafb_lcd_panel_id = panel_id =
viafb_read_reg(VIACR, CR3F) & 0x0F;
switch (panel_id) {
case 0x0:
viaparinfo->lvds_setting_info->lcd_panel_hres = 640;
viaparinfo->lvds_setting_info->lcd_panel_vres = 480;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID0_640X480;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x1:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID1_800X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x2:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x3:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x4:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID4_1280X1024;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x5:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1400;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1050;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID5_1400X1050;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x6:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1600;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1200;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID6_1600X1200;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x8:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 480;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_IDA_800X480;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x9:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0xA:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xB:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xC:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xD:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID4_1280X1024;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xE:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1400;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1050;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID5_1400X1050;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xF:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1600;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1200;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID6_1600X1200;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x10:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1366;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID7_1366X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x11:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID8_1024X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x12:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x13:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 800;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID9_1280X800;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x14:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1360;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_IDB_1360X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x15:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x16:
viaparinfo->lvds_setting_info->lcd_panel_hres = 480;
viaparinfo->lvds_setting_info->lcd_panel_vres = 640;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_IDC_480X640;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x17:
/* OLPC XO-1.5 panel */
viaparinfo->lvds_setting_info->lcd_panel_hres = 1200;
viaparinfo->lvds_setting_info->lcd_panel_vres = 900;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_IDD_1200X900;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
default:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
viaparinfo->lvds_setting_info->lcd_panel_id =
LCD_PANEL_ID1_800X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
}
}
static int lvds_register_read(int index)
{
u8 data;
viafb_i2c_readbyte(VIA_PORT_2C,
(u8) viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr,
(u8) index, &data);
return data;
}
static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
int panel_vres)
{
int reg_value = 0;
int viafb_load_reg_num;
struct io_register *reg = NULL;
DEBUG_MSG(KERN_INFO "load_lcd_scaling()!!\n");
/* LCD Scaling Enable */
viafb_write_reg_mask(CR79, VIACR, 0x07, BIT0 + BIT1 + BIT2);
if (UNICHROME_P4M900 == viaparinfo->chip_info->gfx_chip_name) {
viafb_load_scaling_factor_for_p4m900(set_hres, set_vres,
panel_hres, panel_vres);
return;
}
/* Check if expansion for horizontal */
if (set_hres != panel_hres) {
/* Load Horizontal Scaling Factor */
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
reg_value =
CLE266_LCD_HOR_SCF_FORMULA(set_hres, panel_hres);
viafb_load_reg_num =
lcd_scaling_factor_CLE.lcd_hor_scaling_factor.
reg_num;
reg = lcd_scaling_factor_CLE.lcd_hor_scaling_factor.reg;
viafb_load_reg(reg_value,
viafb_load_reg_num, reg, VIACR);
break;
case UNICHROME_K800:
case UNICHROME_PM800:
case UNICHROME_CN700:
case UNICHROME_CX700:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
reg_value =
K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres);
/* Horizontal scaling enabled */
viafb_write_reg_mask(CRA2, VIACR, 0xC0, BIT7 + BIT6);
viafb_load_reg_num =
lcd_scaling_factor.lcd_hor_scaling_factor.reg_num;
reg = lcd_scaling_factor.lcd_hor_scaling_factor.reg;
viafb_load_reg(reg_value,
viafb_load_reg_num, reg, VIACR);
break;
}
DEBUG_MSG(KERN_INFO "Horizontal Scaling value = %d", reg_value);
} else {
/* Horizontal scaling disabled */
viafb_write_reg_mask(CRA2, VIACR, 0x00, BIT7);
}
/* Check if expansion for vertical */
if (set_vres != panel_vres) {
/* Load Vertical Scaling Factor */
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
reg_value =
CLE266_LCD_VER_SCF_FORMULA(set_vres, panel_vres);
viafb_load_reg_num =
lcd_scaling_factor_CLE.lcd_ver_scaling_factor.
reg_num;
reg = lcd_scaling_factor_CLE.lcd_ver_scaling_factor.reg;
viafb_load_reg(reg_value,
viafb_load_reg_num, reg, VIACR);
break;
case UNICHROME_K800:
case UNICHROME_PM800:
case UNICHROME_CN700:
case UNICHROME_CX700:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
reg_value =
K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres);
/* Vertical scaling enabled */
viafb_write_reg_mask(CRA2, VIACR, 0x08, BIT3);
viafb_load_reg_num =
lcd_scaling_factor.lcd_ver_scaling_factor.reg_num;
reg = lcd_scaling_factor.lcd_ver_scaling_factor.reg;
viafb_load_reg(reg_value,
viafb_load_reg_num, reg, VIACR);
break;
}
DEBUG_MSG(KERN_INFO "Vertical Scaling value = %d", reg_value);
} else {
/* Vertical scaling disabled */
viafb_write_reg_mask(CRA2, VIACR, 0x00, BIT3);
}
}
static void via_pitch_alignment_patch_lcd(
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information
*plvds_chip_info)
{
unsigned char cr13, cr35, cr65, cr66, cr67;
unsigned long dwScreenPitch = 0;
unsigned long dwPitch;
dwPitch = plvds_setting_info->h_active * (plvds_setting_info->bpp >> 3);
if (dwPitch & 0x1F) {
dwScreenPitch = ((dwPitch + 31) & ~31) >> 3;
if (plvds_setting_info->iga_path == IGA2) {
if (plvds_setting_info->bpp > 8) {
cr66 = (unsigned char)(dwScreenPitch & 0xFF);
viafb_write_reg(CR66, VIACR, cr66);
cr67 = viafb_read_reg(VIACR, CR67) & 0xFC;
cr67 |=
(unsigned
char)((dwScreenPitch & 0x300) >> 8);
viafb_write_reg(CR67, VIACR, cr67);
}
/* Fetch Count */
cr67 = viafb_read_reg(VIACR, CR67) & 0xF3;
cr67 |= (unsigned char)((dwScreenPitch & 0x600) >> 7);
viafb_write_reg(CR67, VIACR, cr67);
cr65 = (unsigned char)((dwScreenPitch >> 1) & 0xFF);
cr65 += 2;
viafb_write_reg(CR65, VIACR, cr65);
} else {
if (plvds_setting_info->bpp > 8) {
cr13 = (unsigned char)(dwScreenPitch & 0xFF);
viafb_write_reg(CR13, VIACR, cr13);
cr35 = viafb_read_reg(VIACR, CR35) & 0x1F;
cr35 |=
(unsigned
char)((dwScreenPitch & 0x700) >> 3);
viafb_write_reg(CR35, VIACR, cr35);
}
}
}
}
static void lcd_patch_skew_dvp0(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) {
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_P4M900:
viafb_vt1636_patch_skew_on_vt3364(plvds_setting_info,
plvds_chip_info);
break;
case UNICHROME_P4M890:
viafb_vt1636_patch_skew_on_vt3327(plvds_setting_info,
plvds_chip_info);
break;
}
}
}
static void lcd_patch_skew_dvp1(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) {
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CX700:
viafb_vt1636_patch_skew_on_vt3324(plvds_setting_info,
plvds_chip_info);
break;
}
}
}
static void lcd_patch_skew(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info)
{
DEBUG_MSG(KERN_INFO "lcd_patch_skew\n");
switch (plvds_chip_info->output_interface) {
case INTERFACE_DVP0:
lcd_patch_skew_dvp0(plvds_setting_info, plvds_chip_info);
break;
case INTERFACE_DVP1:
lcd_patch_skew_dvp1(plvds_setting_info, plvds_chip_info);
break;
case INTERFACE_DFP_LOW:
if (UNICHROME_P4M900 == viaparinfo->chip_info->gfx_chip_name) {
viafb_write_reg_mask(CR99, VIACR, 0x08,
BIT0 + BIT1 + BIT2 + BIT3);
}
break;
}
}
/* LCD Set Mode */
void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
int set_iga = plvds_setting_info->iga_path;
int mode_bpp = plvds_setting_info->bpp;
int set_hres = plvds_setting_info->h_active;
int set_vres = plvds_setting_info->v_active;
int panel_hres = plvds_setting_info->lcd_panel_hres;
int panel_vres = plvds_setting_info->lcd_panel_vres;
u32 pll_D_N;
struct display_timing mode_crt_reg, panel_crt_reg;
struct crt_mode_table *panel_crt_table = NULL;
struct VideoModeTable *vmode_tbl = viafb_get_mode(panel_hres,
panel_vres);
DEBUG_MSG(KERN_INFO "viafb_lcd_set_mode!!\n");
/* Get mode table */
mode_crt_reg = mode_crt_table->crtc;
/* Get panel table Pointer */
panel_crt_table = vmode_tbl->crtc;
panel_crt_reg = panel_crt_table->crtc;
DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n");
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name)
viafb_init_lvds_vt1636(plvds_setting_info, plvds_chip_info);
plvds_setting_info->vclk = panel_crt_table->clk;
if (set_iga == IGA1) {
/* IGA1 doesn't have LCD scaling, so set it as centering. */
viafb_load_crtc_timing(lcd_centering_timging
(mode_crt_reg, panel_crt_reg), IGA1);
} else {
/* Expansion */
if ((plvds_setting_info->display_method ==
LCD_EXPANDSION) & ((set_hres != panel_hres)
|| (set_vres != panel_vres))) {
/* expansion timing IGA2 loaded panel set timing*/
viafb_load_crtc_timing(panel_crt_reg, IGA2);
DEBUG_MSG(KERN_INFO "viafb_load_crtc_timing!!\n");
load_lcd_scaling(set_hres, set_vres, panel_hres,
panel_vres);
DEBUG_MSG(KERN_INFO "load_lcd_scaling!!\n");
} else { /* Centering */
/* centering timing IGA2 always loaded panel
and mode releative timing */
viafb_load_crtc_timing(lcd_centering_timging
(mode_crt_reg, panel_crt_reg), IGA2);
viafb_write_reg_mask(CR79, VIACR, 0x00,
BIT0 + BIT1 + BIT2);
/* LCD scaling disabled */
}
}
/* Fetch count for IGA2 only */
viafb_load_fetch_count_reg(set_hres, mode_bpp / 8, set_iga);
if ((viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266)
&& (viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400))
viafb_load_FIFO_reg(set_iga, set_hres, set_vres);
fill_lcd_format();
pll_D_N = viafb_get_clk_value(panel_crt_table[0].clk);
DEBUG_MSG(KERN_INFO "PLL=0x%x", pll_D_N);
viafb_set_vclock(pll_D_N, set_iga);
viafb_set_output_path(DEVICE_LCD, set_iga,
plvds_chip_info->output_interface);
lcd_patch_skew(plvds_setting_info, plvds_chip_info);
/* If K8M800, enable LCD Prefetch Mode. */
if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800)
|| (UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name))
viafb_write_reg_mask(CR6A, VIACR, 0x01, BIT0);
/* Patch for non 32bit alignment mode */
via_pitch_alignment_patch_lcd(plvds_setting_info, plvds_chip_info);
}
static void integrated_lvds_disable(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
bool turn_off_first_powersequence = false;
bool turn_off_second_powersequence = false;
if (INTERFACE_LVDS0LVDS1 == plvds_chip_info->output_interface)
turn_off_first_powersequence = true;
if (INTERFACE_LVDS0 == plvds_chip_info->output_interface)
turn_off_first_powersequence = true;
if (INTERFACE_LVDS1 == plvds_chip_info->output_interface)
turn_off_second_powersequence = true;
if (turn_off_second_powersequence) {
/* Use second power sequence control: */
/* Turn off power sequence. */
viafb_write_reg_mask(CRD4, VIACR, 0, BIT1);
/* Turn off back light. */
viafb_write_reg_mask(CRD3, VIACR, 0xC0, BIT6 + BIT7);
}
if (turn_off_first_powersequence) {
/* Use first power sequence control: */
/* Turn off power sequence. */
viafb_write_reg_mask(CR6A, VIACR, 0, BIT3);
/* Turn off back light. */
viafb_write_reg_mask(CR91, VIACR, 0xC0, BIT6 + BIT7);
}
/* Turn DFP High/Low Pad off. */
viafb_write_reg_mask(SR2A, VIASR, 0, BIT0 + BIT1 + BIT2 + BIT3);
/* Power off LVDS channel. */
switch (plvds_chip_info->output_interface) {
case INTERFACE_LVDS0:
{
viafb_write_reg_mask(CRD2, VIACR, 0x80, BIT7);
break;
}
case INTERFACE_LVDS1:
{
viafb_write_reg_mask(CRD2, VIACR, 0x40, BIT6);
break;
}
case INTERFACE_LVDS0LVDS1:
{
viafb_write_reg_mask(CRD2, VIACR, 0xC0, BIT6 + BIT7);
break;
}
}
}
static void integrated_lvds_enable(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
DEBUG_MSG(KERN_INFO "integrated_lvds_enable, out_interface:%d\n",
plvds_chip_info->output_interface);
if (plvds_setting_info->lcd_mode == LCD_SPWG)
viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT0 + BIT1);
else
viafb_write_reg_mask(CRD2, VIACR, 0x03, BIT0 + BIT1);
switch (plvds_chip_info->output_interface) {
case INTERFACE_LVDS0LVDS1:
case INTERFACE_LVDS0:
/* Use first power sequence control: */
/* Use hardware control power sequence. */
viafb_write_reg_mask(CR91, VIACR, 0, BIT0);
/* Turn on back light. */
viafb_write_reg_mask(CR91, VIACR, 0, BIT6 + BIT7);
/* Turn on hardware power sequence. */
viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
break;
case INTERFACE_LVDS1:
/* Use second power sequence control: */
/* Use hardware control power sequence. */
viafb_write_reg_mask(CRD3, VIACR, 0, BIT0);
/* Turn on back light. */
viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7);
/* Turn on hardware power sequence. */
viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1);
break;
}
/* Turn DFP High/Low pad on. */
viafb_write_reg_mask(SR2A, VIASR, 0x0F, BIT0 + BIT1 + BIT2 + BIT3);
/* Power on LVDS channel. */
switch (plvds_chip_info->output_interface) {
case INTERFACE_LVDS0:
{
viafb_write_reg_mask(CRD2, VIACR, 0, BIT7);
break;
}
case INTERFACE_LVDS1:
{
viafb_write_reg_mask(CRD2, VIACR, 0, BIT6);
break;
}
case INTERFACE_LVDS0LVDS1:
{
viafb_write_reg_mask(CRD2, VIACR, 0, BIT6 + BIT7);
break;
}
}
}
void viafb_lcd_disable(void)
{
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
lcd_powersequence_off();
/* DI1 pad off */
viafb_write_reg_mask(SR1E, VIASR, 0x00, 0x30);
} else if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) {
if (viafb_LCD2_ON
&& (INTEGRATED_LVDS ==
viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name))
integrated_lvds_disable(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info2);
if (INTEGRATED_LVDS ==
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name)
integrated_lvds_disable(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info);
if (VT1636_LVDS == viaparinfo->chip_info->
lvds_chip_info.lvds_chip_name)
viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info);
} else if (VT1636_LVDS ==
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) {
viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info);
} else {
/* DFP-HL pad off */
viafb_write_reg_mask(SR2A, VIASR, 0x00, 0x0F);
/* Backlight off */
viafb_write_reg_mask(SR3D, VIASR, 0x00, 0x20);
/* 24 bit DI data paht off */
viafb_write_reg_mask(CR91, VIACR, 0x80, 0x80);
/* Simultaneout disabled */
viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08);
}
/* Disable expansion bit */
viafb_write_reg_mask(CR79, VIACR, 0x00, 0x01);
/* CRT path set to IGA1 */
viafb_write_reg_mask(SR16, VIASR, 0x00, 0x40);
/* Simultaneout disabled */
viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08);
/* IGA2 path disabled */
viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x80);
}
void viafb_lcd_enable(void)
{
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
/* DI1 pad on */
viafb_write_reg_mask(SR1E, VIASR, 0x30, 0x30);
lcd_powersequence_on();
} else if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) {
if (viafb_LCD2_ON && (INTEGRATED_LVDS ==
viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name))
integrated_lvds_enable(viaparinfo->lvds_setting_info2, \
&viaparinfo->chip_info->lvds_chip_info2);
if (INTEGRATED_LVDS ==
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name)
integrated_lvds_enable(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info);
if (VT1636_LVDS == viaparinfo->chip_info->
lvds_chip_info.lvds_chip_name)
viafb_enable_lvds_vt1636(viaparinfo->
lvds_setting_info, &viaparinfo->chip_info->
lvds_chip_info);
} else if (VT1636_LVDS ==
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) {
viafb_enable_lvds_vt1636(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info);
} else {
/* DFP-HL pad on */
viafb_write_reg_mask(SR2A, VIASR, 0x0F, 0x0F);
/* Backlight on */
viafb_write_reg_mask(SR3D, VIASR, 0x20, 0x20);
/* 24 bit DI data paht on */
viafb_write_reg_mask(CR91, VIACR, 0x00, 0x80);
/* Set data source selection bit by iga path */
if (viaparinfo->lvds_setting_info->iga_path == IGA1) {
/* DFP-H set to IGA1 */
viafb_write_reg_mask(CR97, VIACR, 0x00, 0x10);
/* DFP-L set to IGA1 */
viafb_write_reg_mask(CR99, VIACR, 0x00, 0x10);
} else {
/* DFP-H set to IGA2 */
viafb_write_reg_mask(CR97, VIACR, 0x10, 0x10);
/* DFP-L set to IGA2 */
viafb_write_reg_mask(CR99, VIACR, 0x10, 0x10);
}
/* LCD enabled */
viafb_write_reg_mask(CR6A, VIACR, 0x48, 0x48);
}
if (viaparinfo->lvds_setting_info->iga_path == IGA1) {
/* CRT path set to IGA2 */
viafb_write_reg_mask(SR16, VIASR, 0x40, 0x40);
/* IGA2 path disabled */
viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x80);
/* IGA2 path enabled */
} else { /* IGA2 */
viafb_write_reg_mask(CR6A, VIACR, 0x80, 0x80);
}
}
static void lcd_powersequence_off(void)
{
int i, mask, data;
/* Software control power sequence */
viafb_write_reg_mask(CR91, VIACR, 0x11, 0x11);
for (i = 0; i < 3; i++) {
mask = PowerSequenceOff[0][i];
data = PowerSequenceOff[1][i] & mask;
viafb_write_reg_mask(CR91, VIACR, (u8) data, (u8) mask);
udelay(PowerSequenceOff[2][i]);
}
/* Disable LCD */
viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x08);
}
static void lcd_powersequence_on(void)
{
int i, mask, data;
/* Software control power sequence */
viafb_write_reg_mask(CR91, VIACR, 0x11, 0x11);
/* Enable LCD */
viafb_write_reg_mask(CR6A, VIACR, 0x08, 0x08);
for (i = 0; i < 3; i++) {
mask = PowerSequenceOn[0][i];
data = PowerSequenceOn[1][i] & mask;
viafb_write_reg_mask(CR91, VIACR, (u8) data, (u8) mask);
udelay(PowerSequenceOn[2][i]);
}
udelay(1);
}
static void fill_lcd_format(void)
{
u8 bdithering = 0, bdual = 0;
if (viaparinfo->lvds_setting_info->device_lcd_dualedge)
bdual = BIT4;
if (viaparinfo->lvds_setting_info->LCDDithering)
bdithering = BIT0;
/* Dual & Dithering */
viafb_write_reg_mask(CR88, VIACR, (bdithering | bdual), BIT4 + BIT0);
}
static void check_diport_of_integrated_lvds(
struct lvds_chip_information *plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info)
{
/* Determine LCD DI Port by hardware layout. */
switch (viafb_display_hardware_layout) {
case HW_LAYOUT_LCD_ONLY:
{
if (plvds_setting_info->device_lcd_dualedge) {
plvds_chip_info->output_interface =
INTERFACE_LVDS0LVDS1;
} else {
plvds_chip_info->output_interface =
INTERFACE_LVDS0;
}
break;
}
case HW_LAYOUT_DVI_ONLY:
{
plvds_chip_info->output_interface = INTERFACE_NONE;
break;
}
case HW_LAYOUT_LCD1_LCD2:
case HW_LAYOUT_LCD_EXTERNAL_LCD2:
{
plvds_chip_info->output_interface =
INTERFACE_LVDS0LVDS1;
break;
}
case HW_LAYOUT_LCD_DVI:
{
plvds_chip_info->output_interface = INTERFACE_LVDS1;
break;
}
default:
{
plvds_chip_info->output_interface = INTERFACE_LVDS1;
break;
}
}
DEBUG_MSG(KERN_INFO
"Display Hardware Layout: 0x%x, LCD DI Port: 0x%x\n",
viafb_display_hardware_layout,
plvds_chip_info->output_interface);
}
void viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info)
{
if (INTERFACE_NONE != plvds_chip_info->output_interface) {
/*Do nothing, lcd port is specified by module parameter */
return;
}
switch (plvds_chip_info->lvds_chip_name) {
case VT1636_LVDS:
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CX700:
plvds_chip_info->output_interface = INTERFACE_DVP1;
break;
case UNICHROME_CN700:
plvds_chip_info->output_interface = INTERFACE_DFP_LOW;
break;
default:
plvds_chip_info->output_interface = INTERFACE_DVP0;
break;
}
break;
case INTEGRATED_LVDS:
check_diport_of_integrated_lvds(plvds_chip_info,
plvds_setting_info);
break;
default:
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
case UNICHROME_P4M890:
plvds_chip_info->output_interface = INTERFACE_DFP_LOW;
break;
default:
plvds_chip_info->output_interface = INTERFACE_DFP;
break;
}
break;
}
}
static struct display_timing lcd_centering_timging(struct display_timing
mode_crt_reg,
struct display_timing panel_crt_reg)
{
struct display_timing crt_reg;
crt_reg.hor_total = panel_crt_reg.hor_total;
crt_reg.hor_addr = mode_crt_reg.hor_addr;
crt_reg.hor_blank_start =
(panel_crt_reg.hor_addr - mode_crt_reg.hor_addr) / 2 +
crt_reg.hor_addr;
crt_reg.hor_blank_end = panel_crt_reg.hor_blank_end;
crt_reg.hor_sync_start =
(panel_crt_reg.hor_sync_start -
panel_crt_reg.hor_blank_start) + crt_reg.hor_blank_start;
crt_reg.hor_sync_end = panel_crt_reg.hor_sync_end;
crt_reg.ver_total = panel_crt_reg.ver_total;
crt_reg.ver_addr = mode_crt_reg.ver_addr;
crt_reg.ver_blank_start =
(panel_crt_reg.ver_addr - mode_crt_reg.ver_addr) / 2 +
crt_reg.ver_addr;
crt_reg.ver_blank_end = panel_crt_reg.ver_blank_end;
crt_reg.ver_sync_start =
(panel_crt_reg.ver_sync_start -
panel_crt_reg.ver_blank_start) + crt_reg.ver_blank_start;
crt_reg.ver_sync_end = panel_crt_reg.ver_sync_end;
return crt_reg;
}
bool viafb_lcd_get_mobile_state(bool *mobile)
{
unsigned char *romptr, *tableptr;
u8 core_base;
unsigned char *biosptr;
/* Rom address */
u32 romaddr = 0x000C0000;
u16 start_pattern = 0;
biosptr = ioremap(romaddr, 0x10000);
memcpy(&start_pattern, biosptr, 2);
/* Compare pattern */
if (start_pattern == 0xAA55) {
/* Get the start of Table */
/* 0x1B means BIOS offset position */
romptr = biosptr + 0x1B;
tableptr = biosptr + *((u16 *) romptr);
/* Get the start of biosver structure */
/* 18 means BIOS version position. */
romptr = tableptr + 18;
romptr = biosptr + *((u16 *) romptr);
/* The offset should be 44, but the
actual image is less three char. */
/* pRom += 44; */
romptr += 41;
core_base = *romptr++;
if (core_base & 0x8)
*mobile = false;
else
*mobile = true;
/* release memory */
iounmap(biosptr);
return true;
} else {
iounmap(biosptr);
return false;
}
}
static void viafb_load_scaling_factor_for_p4m900(int set_hres,
int set_vres, int panel_hres, int panel_vres)
{
int h_scaling_factor;
int v_scaling_factor;
u8 cra2 = 0;
u8 cr77 = 0;
u8 cr78 = 0;
u8 cr79 = 0;
u8 cr9f = 0;
/* Check if expansion for horizontal */
if (set_hres < panel_hres) {
/* Load Horizontal Scaling Factor */
/* For VIA_K8M800 or later chipsets. */
h_scaling_factor =
K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres);
/* HSCaleFactor[1:0] at CR9F[1:0] */
cr9f = h_scaling_factor & 0x0003;
/* HSCaleFactor[9:2] at CR77[7:0] */
cr77 = (h_scaling_factor & 0x03FC) >> 2;
/* HSCaleFactor[11:10] at CR79[5:4] */
cr79 = (h_scaling_factor & 0x0C00) >> 10;
cr79 <<= 4;
/* Horizontal scaling enabled */
cra2 = 0xC0;
DEBUG_MSG(KERN_INFO "Horizontal Scaling value = %d\n",
h_scaling_factor);
} else {
/* Horizontal scaling disabled */
cra2 = 0x00;
}
/* Check if expansion for vertical */
if (set_vres < panel_vres) {
/* Load Vertical Scaling Factor */
/* For VIA_K8M800 or later chipsets. */
v_scaling_factor =
K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres);
/* Vertical scaling enabled */
cra2 |= 0x08;
/* VSCaleFactor[0] at CR79[3] */
cr79 |= ((v_scaling_factor & 0x0001) << 3);
/* VSCaleFactor[8:1] at CR78[7:0] */
cr78 |= (v_scaling_factor & 0x01FE) >> 1;
/* VSCaleFactor[10:9] at CR79[7:6] */
cr79 |= ((v_scaling_factor & 0x0600) >> 9) << 6;
DEBUG_MSG(KERN_INFO "Vertical Scaling value = %d\n",
v_scaling_factor);
} else {
/* Vertical scaling disabled */
cra2 |= 0x00;
}
viafb_write_reg_mask(CRA2, VIACR, cra2, BIT3 + BIT6 + BIT7);
viafb_write_reg_mask(CR77, VIACR, cr77, 0xFF);
viafb_write_reg_mask(CR78, VIACR, cr78, 0xFF);
viafb_write_reg_mask(CR79, VIACR, cr79, 0xF8);
viafb_write_reg_mask(CR9F, VIACR, cr9f, BIT0 + BIT1);
}
| gpl-2.0 |
paladin74/linux | drivers/media/usb/uvc/uvc_ctrl.c | 1274 | 57714 | /*
* uvc_ctrl.c -- USB Video Class driver - Controls
*
* Copyright (C) 2005-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <media/v4l2-ctrls.h>
#include "uvcvideo.h"
#define UVC_CTRL_DATA_CURRENT 0
#define UVC_CTRL_DATA_BACKUP 1
#define UVC_CTRL_DATA_MIN 2
#define UVC_CTRL_DATA_MAX 3
#define UVC_CTRL_DATA_RES 4
#define UVC_CTRL_DATA_DEF 5
#define UVC_CTRL_DATA_LAST 6
/* ------------------------------------------------------------------------
* Controls
*/
static struct uvc_control_info uvc_ctrls[] = {
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BRIGHTNESS_CONTROL,
.index = 0,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_CONTRAST_CONTROL,
.index = 1,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_CONTROL,
.index = 2,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SATURATION_CONTROL,
.index = 3,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SHARPNESS_CONTROL,
.index = 4,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAMMA_CONTROL,
.index = 5,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
.index = 6,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.index = 7,
.size = 4,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL,
.index = 8,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAIN_CONTROL,
.index = 9,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.index = 10,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_AUTO_CONTROL,
.index = 11,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
.index = 12,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
.index = 13,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_DIGITAL_MULTIPLIER_CONTROL,
.index = 14,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL,
.index = 15,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL,
.index = 16,
.size = 1,
.flags = UVC_CTRL_FLAG_GET_CUR,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_ANALOG_LOCK_STATUS_CONTROL,
.index = 17,
.size = 1,
.flags = UVC_CTRL_FLAG_GET_CUR,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_SCANNING_MODE_CONTROL,
.index = 0,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_MODE_CONTROL,
.index = 1,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_GET_RES
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_PRIORITY_CONTROL,
.index = 2,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
.index = 3,
.size = 4,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL,
.index = 4,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL,
.index = 5,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_RELATIVE_CONTROL,
.index = 6,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN
| UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES
| UVC_CTRL_FLAG_GET_DEF
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
.index = 7,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.index = 8,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL,
.index = 9,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.index = 10,
.size = 3,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN
| UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES
| UVC_CTRL_FLAG_GET_DEF
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.index = 11,
.size = 8,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.index = 12,
.size = 4,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ROLL_ABSOLUTE_CONTROL,
.index = 13,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR
| UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ROLL_RELATIVE_CONTROL,
.index = 14,
.size = 2,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN
| UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES
| UVC_CTRL_FLAG_GET_DEF
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_AUTO_CONTROL,
.index = 17,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PRIVACY_CONTROL,
.index = 18,
.size = 1,
.flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_RESTORE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
};
static struct uvc_menu_info power_line_frequency_controls[] = {
{ 0, "Disabled" },
{ 1, "50 Hz" },
{ 2, "60 Hz" },
};
static struct uvc_menu_info exposure_auto_controls[] = {
{ 2, "Auto Mode" },
{ 1, "Manual Mode" },
{ 4, "Shutter Priority Mode" },
{ 8, "Aperture Priority Mode" },
};
static __s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping,
__u8 query, const __u8 *data)
{
__s8 zoom = (__s8)data[0];
switch (query) {
case UVC_GET_CUR:
return (zoom == 0) ? 0 : (zoom > 0 ? data[2] : -data[2]);
case UVC_GET_MIN:
case UVC_GET_MAX:
case UVC_GET_RES:
case UVC_GET_DEF:
default:
return data[2];
}
}
static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
__s32 value, __u8 *data)
{
data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
data[2] = min((int)abs(value), 0xff);
}
static __s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping,
__u8 query, const __u8 *data)
{
unsigned int first = mapping->offset / 8;
__s8 rel = (__s8)data[first];
switch (query) {
case UVC_GET_CUR:
return (rel == 0) ? 0 : (rel > 0 ? data[first+1]
: -data[first+1]);
case UVC_GET_MIN:
return -data[first+1];
case UVC_GET_MAX:
case UVC_GET_RES:
case UVC_GET_DEF:
default:
return data[first+1];
}
}
static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
__s32 value, __u8 *data)
{
unsigned int first = mapping->offset / 8;
data[first] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
data[first+1] = min_t(int, abs(value), 0xff);
}
static struct uvc_control_mapping uvc_ctrl_mappings[] = {
{
.id = V4L2_CID_BRIGHTNESS,
.name = "Brightness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BRIGHTNESS_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_CONTRAST,
.name = "Contrast",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_CONTRAST_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_HUE,
.name = "Hue",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
.master_id = V4L2_CID_HUE_AUTO,
.master_manual = 0,
},
{
.id = V4L2_CID_SATURATION,
.name = "Saturation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SATURATION_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_SHARPNESS,
.name = "Sharpness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SHARPNESS_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_GAMMA,
.name = "Gamma",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAMMA_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_BACKLIGHT_COMPENSATION,
.name = "Backlight Compensation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_GAIN,
.name = "Gain",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAIN_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.name = "Power Line Frequency",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.size = 2,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_MENU,
.data_type = UVC_CTRL_DATA_TYPE_ENUM,
.menu_info = power_line_frequency_controls,
.menu_count = ARRAY_SIZE(power_line_frequency_controls),
},
{
.id = V4L2_CID_HUE_AUTO,
.name = "Hue, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
.slave_ids = { V4L2_CID_HUE, },
},
{
.id = V4L2_CID_EXPOSURE_AUTO,
.name = "Exposure, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_MODE_CONTROL,
.size = 4,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_MENU,
.data_type = UVC_CTRL_DATA_TYPE_BITMASK,
.menu_info = exposure_auto_controls,
.menu_count = ARRAY_SIZE(exposure_auto_controls),
.slave_ids = { V4L2_CID_EXPOSURE_ABSOLUTE, },
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
.name = "Exposure, Auto Priority",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_PRIORITY_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
},
{
.id = V4L2_CID_EXPOSURE_ABSOLUTE,
.name = "Exposure (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
.size = 32,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
.master_id = V4L2_CID_EXPOSURE_AUTO,
.master_manual = V4L2_EXPOSURE_MANUAL,
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.name = "White Balance Temperature, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
.slave_ids = { V4L2_CID_WHITE_BALANCE_TEMPERATURE, },
},
{
.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
.name = "White Balance Temperature",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
.master_id = V4L2_CID_AUTO_WHITE_BALANCE,
.master_manual = 0,
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.name = "White Balance Component, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
.slave_ids = { V4L2_CID_BLUE_BALANCE,
V4L2_CID_RED_BALANCE },
},
{
.id = V4L2_CID_BLUE_BALANCE,
.name = "White Balance Blue Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
.master_id = V4L2_CID_AUTO_WHITE_BALANCE,
.master_manual = 0,
},
{
.id = V4L2_CID_RED_BALANCE,
.name = "White Balance Red Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
.offset = 16,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
.master_id = V4L2_CID_AUTO_WHITE_BALANCE,
.master_manual = 0,
},
{
.id = V4L2_CID_FOCUS_ABSOLUTE,
.name = "Focus (absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
.master_id = V4L2_CID_FOCUS_AUTO,
.master_manual = 0,
},
{
.id = V4L2_CID_FOCUS_AUTO,
.name = "Focus, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
.slave_ids = { V4L2_CID_FOCUS_ABSOLUTE, },
},
{
.id = V4L2_CID_IRIS_ABSOLUTE,
.name = "Iris, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_IRIS_RELATIVE,
.name = "Iris, Relative",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.size = 8,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_ZOOM_ABSOLUTE,
.name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
},
{
.id = V4L2_CID_ZOOM_CONTINUOUS,
.name = "Zoom, Continuous",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.size = 0,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
.get = uvc_ctrl_get_zoom,
.set = uvc_ctrl_set_zoom,
},
{
.id = V4L2_CID_PAN_ABSOLUTE,
.name = "Pan (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_TILT_ABSOLUTE,
.name = "Tilt (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
.offset = 32,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
.id = V4L2_CID_PAN_SPEED,
.name = "Pan (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
.get = uvc_ctrl_get_rel_speed,
.set = uvc_ctrl_set_rel_speed,
},
{
.id = V4L2_CID_TILT_SPEED,
.name = "Tilt (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
.offset = 16,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
.get = uvc_ctrl_get_rel_speed,
.set = uvc_ctrl_set_rel_speed,
},
{
.id = V4L2_CID_PRIVACY,
.name = "Privacy",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
},
};
/* ------------------------------------------------------------------------
* Utility functions
*/
static inline __u8 *uvc_ctrl_data(struct uvc_control *ctrl, int id)
{
return ctrl->uvc_data + id * ctrl->info.size;
}
static inline int uvc_test_bit(const __u8 *data, int bit)
{
return (data[bit >> 3] >> (bit & 7)) & 1;
}
static inline void uvc_clear_bit(__u8 *data, int bit)
{
data[bit >> 3] &= ~(1 << (bit & 7));
}
/* Extract the bit string specified by mapping->offset and mapping->size
* from the little-endian data stored at 'data' and return the result as
* a signed 32bit integer. Sign extension will be performed if the mapping
* references a signed data type.
*/
static __s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
__u8 query, const __u8 *data)
{
int bits = mapping->size;
int offset = mapping->offset;
__s32 value = 0;
__u8 mask;
data += offset / 8;
offset &= 7;
mask = ((1LL << bits) - 1) << offset;
for (; bits > 0; data++) {
__u8 byte = *data & mask;
value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
bits -= 8 - (offset > 0 ? offset : 0);
offset -= 8;
mask = (1 << bits) - 1;
}
/* Sign-extend the value if needed. */
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
value |= -(value & (1 << (mapping->size - 1)));
return value;
}
/* Set the bit string specified by mapping->offset and mapping->size
* in the little-endian data stored at 'data' to the value 'value'.
*/
static void uvc_set_le_value(struct uvc_control_mapping *mapping,
__s32 value, __u8 *data)
{
int bits = mapping->size;
int offset = mapping->offset;
__u8 mask;
/* According to the v4l2 spec, writing any value to a button control
* should result in the action belonging to the button control being
* triggered. UVC devices however want to see a 1 written -> override
* value.
*/
if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON)
value = -1;
data += offset / 8;
offset &= 7;
for (; bits > 0; data++) {
mask = ((1LL << bits) - 1) << offset;
*data = (*data & ~mask) | ((value << offset) & mask);
value >>= offset ? offset : 8;
bits -= 8 - offset;
offset = 0;
}
}
/* ------------------------------------------------------------------------
* Terminal and unit management
*/
static const __u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
static const __u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
static const __u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
static int uvc_entity_match_guid(const struct uvc_entity *entity,
const __u8 guid[16])
{
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_ITT_CAMERA:
return memcmp(uvc_camera_guid, guid, 16) == 0;
case UVC_ITT_MEDIA_TRANSPORT_INPUT:
return memcmp(uvc_media_transport_input_guid, guid, 16) == 0;
case UVC_VC_PROCESSING_UNIT:
return memcmp(uvc_processing_guid, guid, 16) == 0;
case UVC_VC_EXTENSION_UNIT:
return memcmp(entity->extension.guidExtensionCode,
guid, 16) == 0;
default:
return 0;
}
}
/* ------------------------------------------------------------------------
* UVC Controls
*/
static void __uvc_find_control(struct uvc_entity *entity, __u32 v4l2_id,
struct uvc_control_mapping **mapping, struct uvc_control **control,
int next)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *map;
unsigned int i;
if (entity == NULL)
return;
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
if (!ctrl->initialized)
continue;
list_for_each_entry(map, &ctrl->info.mappings, list) {
if ((map->id == v4l2_id) && !next) {
*control = ctrl;
*mapping = map;
return;
}
if ((*mapping == NULL || (*mapping)->id > map->id) &&
(map->id > v4l2_id) && next) {
*control = ctrl;
*mapping = map;
}
}
}
}
static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
__u32 v4l2_id, struct uvc_control_mapping **mapping)
{
struct uvc_control *ctrl = NULL;
struct uvc_entity *entity;
int next = v4l2_id & V4L2_CTRL_FLAG_NEXT_CTRL;
*mapping = NULL;
/* Mask the query flags. */
v4l2_id &= V4L2_CTRL_ID_MASK;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
__uvc_find_control(entity, v4l2_id, mapping, &ctrl, next);
if (ctrl && !next)
return ctrl;
}
if (ctrl == NULL && !next)
uvc_trace(UVC_TRACE_CONTROL, "Control 0x%08x not found.\n",
v4l2_id);
return ctrl;
}
static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
int ret;
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_DEF, ctrl->entity->id,
chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF),
ctrl->info.size);
if (ret < 0)
return ret;
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_MIN, ctrl->entity->id,
chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN),
ctrl->info.size);
if (ret < 0)
return ret;
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_MAX, ctrl->entity->id,
chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX),
ctrl->info.size);
if (ret < 0)
return ret;
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_RES, ctrl->entity->id,
chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES),
ctrl->info.size);
if (ret < 0) {
if (UVC_ENTITY_TYPE(ctrl->entity) !=
UVC_VC_EXTENSION_UNIT)
return ret;
/* GET_RES is mandatory for XU controls, but some
* cameras still choke on it. Ignore errors and set the
* resolution value to zero.
*/
uvc_warn_once(chain->dev, UVC_WARN_XU_GET_RES,
"UVC non compliance - GET_RES failed on "
"an XU control. Enabling workaround.\n");
memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES), 0,
ctrl->info.size);
}
}
ctrl->cached = 1;
return 0;
}
static int __uvc_ctrl_get(struct uvc_video_chain *chain,
struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
s32 *value)
{
struct uvc_menu_info *menu;
unsigned int i;
int ret;
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
return -EACCES;
if (!ctrl->loaded) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
if (ret < 0)
return ret;
ctrl->loaded = 1;
}
*value = mapping->get(mapping, UVC_GET_CUR,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
menu = mapping->menu_info;
for (i = 0; i < mapping->menu_count; ++i, ++menu) {
if (menu->value == *value) {
*value = i;
break;
}
}
}
return 0;
}
static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
struct v4l2_queryctrl *v4l2_ctrl)
{
struct uvc_control_mapping *master_map = NULL;
struct uvc_control *master_ctrl = NULL;
struct uvc_menu_info *menu;
unsigned int i;
memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl);
v4l2_ctrl->id = mapping->id;
v4l2_ctrl->type = mapping->v4l2_type;
strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name);
v4l2_ctrl->flags = 0;
if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (mapping->master_id)
__uvc_find_control(ctrl->entity, mapping->master_id,
&master_map, &master_ctrl, 0);
if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
s32 val;
int ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
if (ret < 0)
return ret;
if (val != mapping->master_manual)
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
}
if (!ctrl->cached) {
int ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
return ret;
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) {
v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF));
}
switch (mapping->v4l2_type) {
case V4L2_CTRL_TYPE_MENU:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = mapping->menu_count - 1;
v4l2_ctrl->step = 1;
menu = mapping->menu_info;
for (i = 0; i < mapping->menu_count; ++i, ++menu) {
if (menu->value == v4l2_ctrl->default_value) {
v4l2_ctrl->default_value = i;
break;
}
}
return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 1;
v4l2_ctrl->step = 1;
return 0;
case V4L2_CTRL_TYPE_BUTTON:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 0;
v4l2_ctrl->step = 0;
return 0;
default:
break;
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN)
v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX)
v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)
v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
return 0;
}
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct v4l2_queryctrl *v4l2_ctrl)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
int ret;
ret = mutex_lock_interruptible(&chain->ctrl_mutex);
if (ret < 0)
return -ERESTARTSYS;
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
goto done;
}
ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
/*
* Mapping V4L2 controls to UVC controls can be straighforward if done well.
* Most of the UVC controls exist in V4L2, and can be mapped directly. Some
* must be grouped (for instance the Red Balance, Blue Balance and Do White
* Balance V4L2 controls use the White Balance Component UVC control) or
* otherwise translated. The approach we take here is to use a translation
* table for the controls that can be mapped directly, and handle the others
* manually.
*/
int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
struct v4l2_querymenu *query_menu)
{
struct uvc_menu_info *menu_info;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
u32 index = query_menu->index;
u32 id = query_menu->id;
int ret;
memset(query_menu, 0, sizeof(*query_menu));
query_menu->id = id;
query_menu->index = index;
ret = mutex_lock_interruptible(&chain->ctrl_mutex);
if (ret < 0)
return -ERESTARTSYS;
ctrl = uvc_find_control(chain, query_menu->id, &mapping);
if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) {
ret = -EINVAL;
goto done;
}
if (query_menu->index >= mapping->menu_count) {
ret = -EINVAL;
goto done;
}
menu_info = &mapping->menu_info[query_menu->index];
if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
(ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
s32 bitmap;
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
goto done;
}
bitmap = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
if (!(bitmap & menu_info->value)) {
ret = -EINVAL;
goto done;
}
}
strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
/* --------------------------------------------------------------------------
* Ctrl event handling
*/
static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
struct v4l2_event *ev,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
s32 value, u32 changes)
{
struct v4l2_queryctrl v4l2_ctrl;
__uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
memset(ev->reserved, 0, sizeof(ev->reserved));
ev->type = V4L2_EVENT_CTRL;
ev->id = v4l2_ctrl.id;
ev->u.ctrl.value = value;
ev->u.ctrl.changes = changes;
ev->u.ctrl.type = v4l2_ctrl.type;
ev->u.ctrl.flags = v4l2_ctrl.flags;
ev->u.ctrl.minimum = v4l2_ctrl.minimum;
ev->u.ctrl.maximum = v4l2_ctrl.maximum;
ev->u.ctrl.step = v4l2_ctrl.step;
ev->u.ctrl.default_value = v4l2_ctrl.default_value;
}
static void uvc_ctrl_send_event(struct uvc_fh *handle,
struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
s32 value, u32 changes)
{
struct v4l2_subscribed_event *sev;
struct v4l2_event ev;
if (list_empty(&mapping->ev_subs))
return;
uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, value, changes);
list_for_each_entry(sev, &mapping->ev_subs, node) {
if (sev->fh && (sev->fh != &handle->vfh ||
(sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK) ||
(changes & V4L2_EVENT_CTRL_CH_FLAGS)))
v4l2_event_queue_fh(sev->fh, &ev);
}
}
static void uvc_ctrl_send_slave_event(struct uvc_fh *handle,
struct uvc_control *master, u32 slave_id,
const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
{
struct uvc_control_mapping *mapping = NULL;
struct uvc_control *ctrl = NULL;
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
unsigned int i;
s32 val = 0;
/*
* We can skip sending an event for the slave if the slave
* is being modified in the same transaction.
*/
for (i = 0; i < xctrls_count; i++) {
if (xctrls[i].id == slave_id)
return;
}
__uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0);
if (ctrl == NULL)
return;
if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
uvc_ctrl_send_event(handle, ctrl, mapping, val, changes);
}
static void uvc_ctrl_send_events(struct uvc_fh *handle,
const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
{
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
unsigned int i;
unsigned int j;
for (i = 0; i < xctrls_count; ++i) {
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) {
if (!mapping->slave_ids[j])
break;
uvc_ctrl_send_slave_event(handle, ctrl,
mapping->slave_ids[j],
xctrls, xctrls_count);
}
/*
* If the master is being modified in the same transaction
* flags may change too.
*/
if (mapping->master_id) {
for (j = 0; j < xctrls_count; j++) {
if (xctrls[j].id == mapping->master_id) {
changes |= V4L2_EVENT_CTRL_CH_FLAGS;
break;
}
}
}
uvc_ctrl_send_event(handle, ctrl, mapping, xctrls[i].value,
changes);
}
}
static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
{
struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
int ret;
ret = mutex_lock_interruptible(&handle->chain->ctrl_mutex);
if (ret < 0)
return -ERESTARTSYS;
ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
goto done;
}
list_add_tail(&sev->node, &mapping->ev_subs);
if (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL) {
struct v4l2_event ev;
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
s32 val = 0;
if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
changes);
/* Mark the queue as active, allowing this initial
event to be accepted. */
sev->elems = elems;
v4l2_event_queue_fh(sev->fh, &ev);
}
done:
mutex_unlock(&handle->chain->ctrl_mutex);
return ret;
}
static void uvc_ctrl_del_event(struct v4l2_subscribed_event *sev)
{
struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
mutex_lock(&handle->chain->ctrl_mutex);
list_del(&sev->node);
mutex_unlock(&handle->chain->ctrl_mutex);
}
const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops = {
.add = uvc_ctrl_add_event,
.del = uvc_ctrl_del_event,
.replace = v4l2_ctrl_replace,
.merge = v4l2_ctrl_merge,
};
/* --------------------------------------------------------------------------
* Control transactions
*
* To make extended set operations as atomic as the hardware allows, controls
* are handled using begin/commit/rollback operations.
*
* At the beginning of a set request, uvc_ctrl_begin should be called to
* initialize the request. This function acquires the control lock.
*
* When setting a control, the new value is stored in the control data field
* at position UVC_CTRL_DATA_CURRENT. The control is then marked as dirty for
* later processing. If the UVC and V4L2 control sizes differ, the current
* value is loaded from the hardware before storing the new value in the data
* field.
*
* After processing all controls in the transaction, uvc_ctrl_commit or
* uvc_ctrl_rollback must be called to apply the pending changes to the
* hardware or revert them. When applying changes, all controls marked as
* dirty will be modified in the UVC device, and the dirty flag will be
* cleared. When reverting controls, the control data field
* UVC_CTRL_DATA_CURRENT is reverted to its previous value
* (UVC_CTRL_DATA_BACKUP) for all dirty controls. Both functions release the
* control lock.
*/
int uvc_ctrl_begin(struct uvc_video_chain *chain)
{
return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0;
}
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
struct uvc_entity *entity, int rollback)
{
struct uvc_control *ctrl;
unsigned int i;
int ret;
if (entity == NULL)
return 0;
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
if (!ctrl->initialized)
continue;
/* Reset the loaded flag for auto-update controls that were
* marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent
* uvc_ctrl_get from using the cached value, and for write-only
* controls to prevent uvc_ctrl_set from setting bits not
* explicitly set by the user.
*/
if (ctrl->info.flags & UVC_CTRL_FLAG_AUTO_UPDATE ||
!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
ctrl->loaded = 0;
if (!ctrl->dirty)
continue;
if (!rollback)
ret = uvc_query_ctrl(dev, UVC_SET_CUR, ctrl->entity->id,
dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
else
ret = 0;
if (rollback || ret < 0)
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
ctrl->info.size);
ctrl->dirty = 0;
if (ret < 0)
return ret;
}
return 0;
}
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
const struct v4l2_ext_control *xctrls,
unsigned int xctrls_count)
{
struct uvc_video_chain *chain = handle->chain;
struct uvc_entity *entity;
int ret = 0;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback);
if (ret < 0)
goto done;
}
if (!rollback)
uvc_ctrl_send_events(handle, xctrls, xctrls_count);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
int uvc_ctrl_get(struct uvc_video_chain *chain,
struct v4l2_ext_control *xctrl)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
}
int uvc_ctrl_set(struct uvc_video_chain *chain,
struct v4l2_ext_control *xctrl)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
s32 value;
u32 step;
s32 min;
s32 max;
int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
return -EACCES;
/* Clamp out of range values. */
switch (mapping->v4l2_type) {
case V4L2_CTRL_TYPE_INTEGER:
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
return ret;
}
min = mapping->get(mapping, UVC_GET_MIN,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
max = mapping->get(mapping, UVC_GET_MAX,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
if (step == 0)
step = 1;
xctrl->value = min + ((u32)(xctrl->value - min) + step / 2)
/ step * step;
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
xctrl->value = clamp(xctrl->value, min, max);
else
xctrl->value = clamp_t(u32, xctrl->value, min, max);
value = xctrl->value;
break;
case V4L2_CTRL_TYPE_BOOLEAN:
xctrl->value = clamp(xctrl->value, 0, 1);
value = xctrl->value;
break;
case V4L2_CTRL_TYPE_MENU:
if (xctrl->value < 0 || xctrl->value >= mapping->menu_count)
return -ERANGE;
value = mapping->menu_info[xctrl->value].value;
/* Valid menu indices are reported by the GET_RES request for
* UVC controls that support it.
*/
if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
(ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
return ret;
}
step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
if (!(step & value))
return -EINVAL;
}
break;
default:
value = xctrl->value;
break;
}
/* If the mapping doesn't span the whole UVC control, the current value
* needs to be loaded from the device to perform the read-modify-write
* operation.
*/
if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) {
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) {
memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
0, ctrl->info.size);
} else {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
ctrl->entity->id, chain->dev->intfnum,
ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
if (ret < 0)
return ret;
}
ctrl->loaded = 1;
}
/* Backup the current value in case we need to rollback later. */
if (!ctrl->dirty) {
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
}
mapping->set(mapping, value,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
ctrl->dirty = 1;
ctrl->modified = 1;
return 0;
}
/* --------------------------------------------------------------------------
* Dynamic controls
*/
static void uvc_ctrl_fixup_xu_info(struct uvc_device *dev,
const struct uvc_control *ctrl, struct uvc_control_info *info)
{
struct uvc_ctrl_fixup {
struct usb_device_id id;
u8 entity;
u8 selector;
u8 flags;
};
static const struct uvc_ctrl_fixup fixups[] = {
{ { USB_DEVICE(0x046d, 0x08c2) }, 9, 1,
UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX |
UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR |
UVC_CTRL_FLAG_AUTO_UPDATE },
{ { USB_DEVICE(0x046d, 0x08cc) }, 9, 1,
UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX |
UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR |
UVC_CTRL_FLAG_AUTO_UPDATE },
{ { USB_DEVICE(0x046d, 0x0994) }, 9, 1,
UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX |
UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR |
UVC_CTRL_FLAG_AUTO_UPDATE },
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(fixups); ++i) {
if (!usb_match_one_id(dev->intf, &fixups[i].id))
continue;
if (fixups[i].entity == ctrl->entity->id &&
fixups[i].selector == info->selector) {
info->flags = fixups[i].flags;
return;
}
}
}
/*
* Query control information (size and flags) for XU controls.
*/
static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
const struct uvc_control *ctrl, struct uvc_control_info *info)
{
u8 *data;
int ret;
data = kmalloc(2, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
memcpy(info->entity, ctrl->entity->extension.guidExtensionCode,
sizeof(info->entity));
info->index = ctrl->index;
info->selector = ctrl->index + 1;
/* Query and verify the control length (GET_LEN) */
ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id, dev->intfnum,
info->selector, data, 2);
if (ret < 0) {
uvc_trace(UVC_TRACE_CONTROL,
"GET_LEN failed on control %pUl/%u (%d).\n",
info->entity, info->selector, ret);
goto done;
}
info->size = le16_to_cpup((__le16 *)data);
/* Query the control information (GET_INFO) */
ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum,
info->selector, data, 1);
if (ret < 0) {
uvc_trace(UVC_TRACE_CONTROL,
"GET_INFO failed on control %pUl/%u (%d).\n",
info->entity, info->selector, ret);
goto done;
}
info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
| UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF
| (data[0] & UVC_CONTROL_CAP_GET ?
UVC_CTRL_FLAG_GET_CUR : 0)
| (data[0] & UVC_CONTROL_CAP_SET ?
UVC_CTRL_FLAG_SET_CUR : 0)
| (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
UVC_CTRL_FLAG_AUTO_UPDATE : 0);
uvc_ctrl_fixup_xu_info(dev, ctrl, info);
uvc_trace(UVC_TRACE_CONTROL, "XU control %pUl/%u queried: len %u, "
"flags { get %u set %u auto %u }.\n",
info->entity, info->selector, info->size,
(info->flags & UVC_CTRL_FLAG_GET_CUR) ? 1 : 0,
(info->flags & UVC_CTRL_FLAG_SET_CUR) ? 1 : 0,
(info->flags & UVC_CTRL_FLAG_AUTO_UPDATE) ? 1 : 0);
done:
kfree(data);
return ret;
}
static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
const struct uvc_control_info *info);
static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev,
struct uvc_control *ctrl)
{
struct uvc_control_info info;
int ret;
if (ctrl->initialized)
return 0;
ret = uvc_ctrl_fill_xu_info(dev, ctrl, &info);
if (ret < 0)
return ret;
ret = uvc_ctrl_add_info(dev, ctrl, &info);
if (ret < 0)
uvc_trace(UVC_TRACE_CONTROL, "Failed to initialize control "
"%pUl/%u on device %s entity %u\n", info.entity,
info.selector, dev->udev->devpath, ctrl->entity->id);
return ret;
}
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry)
{
struct uvc_entity *entity;
struct uvc_control *ctrl;
unsigned int i, found = 0;
__u32 reqflags;
__u16 size;
__u8 *data = NULL;
int ret;
/* Find the extension unit. */
list_for_each_entry(entity, &chain->entities, chain) {
if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
entity->id == xqry->unit)
break;
}
if (entity->id != xqry->unit) {
uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n",
xqry->unit);
return -ENOENT;
}
/* Find the control and perform delayed initialization if needed. */
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
if (ctrl->index == xqry->selector - 1) {
found = 1;
break;
}
}
if (!found) {
uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u not found.\n",
entity->extension.guidExtensionCode, xqry->selector);
return -ENOENT;
}
if (mutex_lock_interruptible(&chain->ctrl_mutex))
return -ERESTARTSYS;
ret = uvc_ctrl_init_xu_ctrl(chain->dev, ctrl);
if (ret < 0) {
ret = -ENOENT;
goto done;
}
/* Validate the required buffer size and flags for the request */
reqflags = 0;
size = ctrl->info.size;
switch (xqry->query) {
case UVC_GET_CUR:
reqflags = UVC_CTRL_FLAG_GET_CUR;
break;
case UVC_GET_MIN:
reqflags = UVC_CTRL_FLAG_GET_MIN;
break;
case UVC_GET_MAX:
reqflags = UVC_CTRL_FLAG_GET_MAX;
break;
case UVC_GET_DEF:
reqflags = UVC_CTRL_FLAG_GET_DEF;
break;
case UVC_GET_RES:
reqflags = UVC_CTRL_FLAG_GET_RES;
break;
case UVC_SET_CUR:
reqflags = UVC_CTRL_FLAG_SET_CUR;
break;
case UVC_GET_LEN:
size = 2;
break;
case UVC_GET_INFO:
size = 1;
break;
default:
ret = -EINVAL;
goto done;
}
if (size != xqry->size) {
ret = -ENOBUFS;
goto done;
}
if (reqflags && !(ctrl->info.flags & reqflags)) {
ret = -EBADRQC;
goto done;
}
data = kmalloc(size, GFP_KERNEL);
if (data == NULL) {
ret = -ENOMEM;
goto done;
}
if (xqry->query == UVC_SET_CUR &&
copy_from_user(data, xqry->data, size)) {
ret = -EFAULT;
goto done;
}
ret = uvc_query_ctrl(chain->dev, xqry->query, xqry->unit,
chain->dev->intfnum, xqry->selector, data, size);
if (ret < 0)
goto done;
if (xqry->query != UVC_SET_CUR &&
copy_to_user(xqry->data, data, size))
ret = -EFAULT;
done:
kfree(data);
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
/* --------------------------------------------------------------------------
* Suspend/resume
*/
/*
* Restore control values after resume, skipping controls that haven't been
* changed.
*
* TODO
* - Don't restore modified controls that are back to their default value.
* - Handle restore order (Auto-Exposure Mode should be restored before
* Exposure Time).
*/
int uvc_ctrl_restore_values(struct uvc_device *dev)
{
struct uvc_control *ctrl;
struct uvc_entity *entity;
unsigned int i;
int ret;
/* Walk the entities list and restore controls when possible. */
list_for_each_entry(entity, &dev->entities, list) {
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
if (!ctrl->initialized || !ctrl->modified ||
(ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0)
continue;
printk(KERN_INFO "restoring control %pUl/%u/%u\n",
ctrl->info.entity, ctrl->info.index,
ctrl->info.selector);
ctrl->dirty = 1;
}
ret = uvc_ctrl_commit_entity(dev, entity, 0);
if (ret < 0)
return ret;
}
return 0;
}
/* --------------------------------------------------------------------------
* Control and mapping handling
*/
/*
* Add control information to a given control.
*/
static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
const struct uvc_control_info *info)
{
int ret = 0;
ctrl->info = *info;
INIT_LIST_HEAD(&ctrl->info.mappings);
/* Allocate an array to save control values (cur, def, max, etc.) */
ctrl->uvc_data = kzalloc(ctrl->info.size * UVC_CTRL_DATA_LAST + 1,
GFP_KERNEL);
if (ctrl->uvc_data == NULL) {
ret = -ENOMEM;
goto done;
}
ctrl->initialized = 1;
uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s "
"entity %u\n", ctrl->info.entity, ctrl->info.selector,
dev->udev->devpath, ctrl->entity->id);
done:
if (ret < 0)
kfree(ctrl->uvc_data);
return ret;
}
/*
* Add a control mapping to a given control.
*/
static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
struct uvc_control *ctrl, const struct uvc_control_mapping *mapping)
{
struct uvc_control_mapping *map;
unsigned int size;
/* Most mappings come from static kernel data and need to be duplicated.
* Mappings that come from userspace will be unnecessarily duplicated,
* this could be optimized.
*/
map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&map->ev_subs);
size = sizeof(*mapping->menu_info) * mapping->menu_count;
map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
if (map->menu_info == NULL) {
kfree(map);
return -ENOMEM;
}
if (map->get == NULL)
map->get = uvc_get_le_value;
if (map->set == NULL)
map->set = uvc_set_le_value;
list_add_tail(&map->list, &ctrl->info.mappings);
uvc_trace(UVC_TRACE_CONTROL,
"Adding mapping '%s' to control %pUl/%u.\n",
map->name, ctrl->info.entity, ctrl->info.selector);
return 0;
}
int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
const struct uvc_control_mapping *mapping)
{
struct uvc_device *dev = chain->dev;
struct uvc_control_mapping *map;
struct uvc_entity *entity;
struct uvc_control *ctrl;
int found = 0;
int ret;
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', control "
"id 0x%08x is invalid.\n", mapping->name,
mapping->id);
return -EINVAL;
}
/* Search for the matching (GUID/CS) control on the current chain */
list_for_each_entry(entity, &chain->entities, chain) {
unsigned int i;
if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT ||
!uvc_entity_match_guid(entity, mapping->entity))
continue;
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
if (ctrl->index == mapping->selector - 1) {
found = 1;
break;
}
}
if (found)
break;
}
if (!found)
return -ENOENT;
if (mutex_lock_interruptible(&chain->ctrl_mutex))
return -ERESTARTSYS;
/* Perform delayed initialization of XU controls */
ret = uvc_ctrl_init_xu_ctrl(dev, ctrl);
if (ret < 0) {
ret = -ENOENT;
goto done;
}
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
"control id 0x%08x already exists.\n",
mapping->name, mapping->id);
ret = -EEXIST;
goto done;
}
}
/* Prevent excess memory consumption */
if (atomic_inc_return(&dev->nmappings) > UVC_MAX_CONTROL_MAPPINGS) {
atomic_dec(&dev->nmappings);
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', maximum "
"mappings count (%u) exceeded.\n", mapping->name,
UVC_MAX_CONTROL_MAPPINGS);
ret = -ENOMEM;
goto done;
}
ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping);
if (ret < 0)
atomic_dec(&dev->nmappings);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
/*
* Prune an entity of its bogus controls using a blacklist. Bogus controls
* are currently the ones that crash the camera or unconditionally return an
* error when queried.
*/
static void uvc_ctrl_prune_entity(struct uvc_device *dev,
struct uvc_entity *entity)
{
struct uvc_ctrl_blacklist {
struct usb_device_id id;
u8 index;
};
static const struct uvc_ctrl_blacklist processing_blacklist[] = {
{ { USB_DEVICE(0x13d3, 0x509b) }, 9 }, /* Gain */
{ { USB_DEVICE(0x1c4f, 0x3000) }, 6 }, /* WB Temperature */
{ { USB_DEVICE(0x5986, 0x0241) }, 2 }, /* Hue */
};
static const struct uvc_ctrl_blacklist camera_blacklist[] = {
{ { USB_DEVICE(0x06f8, 0x3005) }, 9 }, /* Zoom, Absolute */
};
const struct uvc_ctrl_blacklist *blacklist;
unsigned int size;
unsigned int count;
unsigned int i;
u8 *controls;
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_VC_PROCESSING_UNIT:
blacklist = processing_blacklist;
count = ARRAY_SIZE(processing_blacklist);
controls = entity->processing.bmControls;
size = entity->processing.bControlSize;
break;
case UVC_ITT_CAMERA:
blacklist = camera_blacklist;
count = ARRAY_SIZE(camera_blacklist);
controls = entity->camera.bmControls;
size = entity->camera.bControlSize;
break;
default:
return;
}
for (i = 0; i < count; ++i) {
if (!usb_match_one_id(dev->intf, &blacklist[i].id))
continue;
if (blacklist[i].index >= 8 * size ||
!uvc_test_bit(controls, blacklist[i].index))
continue;
uvc_trace(UVC_TRACE_CONTROL, "%u/%u control is black listed, "
"removing it.\n", entity->id, blacklist[i].index);
uvc_clear_bit(controls, blacklist[i].index);
}
}
/*
* Add control information and hardcoded stock control mappings to the given
* device.
*/
static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
{
const struct uvc_control_info *info = uvc_ctrls;
const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
const struct uvc_control_mapping *mapping = uvc_ctrl_mappings;
const struct uvc_control_mapping *mend =
mapping + ARRAY_SIZE(uvc_ctrl_mappings);
/* XU controls initialization requires querying the device for control
* information. As some buggy UVC devices will crash when queried
* repeatedly in a tight loop, delay XU controls initialization until
* first use.
*/
if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT)
return;
for (; info < iend; ++info) {
if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
ctrl->index == info->index) {
uvc_ctrl_add_info(dev, ctrl, info);
break;
}
}
if (!ctrl->initialized)
return;
for (; mapping < mend; ++mapping) {
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
__uvc_ctrl_add_mapping(dev, ctrl, mapping);
}
}
/*
* Initialize device controls.
*/
int uvc_ctrl_init_device(struct uvc_device *dev)
{
struct uvc_entity *entity;
unsigned int i;
/* Walk the entities list and instantiate controls */
list_for_each_entry(entity, &dev->entities, list) {
struct uvc_control *ctrl;
unsigned int bControlSize = 0, ncontrols;
__u8 *bmControls = NULL;
if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) {
bmControls = entity->extension.bmControls;
bControlSize = entity->extension.bControlSize;
} else if (UVC_ENTITY_TYPE(entity) == UVC_VC_PROCESSING_UNIT) {
bmControls = entity->processing.bmControls;
bControlSize = entity->processing.bControlSize;
} else if (UVC_ENTITY_TYPE(entity) == UVC_ITT_CAMERA) {
bmControls = entity->camera.bmControls;
bControlSize = entity->camera.bControlSize;
}
/* Remove bogus/blacklisted controls */
uvc_ctrl_prune_entity(dev, entity);
/* Count supported controls and allocate the controls array */
ncontrols = memweight(bmControls, bControlSize);
if (ncontrols == 0)
continue;
entity->controls = kcalloc(ncontrols, sizeof(*ctrl),
GFP_KERNEL);
if (entity->controls == NULL)
return -ENOMEM;
entity->ncontrols = ncontrols;
/* Initialize all supported controls */
ctrl = entity->controls;
for (i = 0; i < bControlSize * 8; ++i) {
if (uvc_test_bit(bmControls, i) == 0)
continue;
ctrl->entity = entity;
ctrl->index = i;
uvc_ctrl_init_ctrl(dev, ctrl);
ctrl++;
}
}
return 0;
}
/*
* Cleanup device controls.
*/
static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
struct uvc_control *ctrl)
{
struct uvc_control_mapping *mapping, *nm;
list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
list_del(&mapping->list);
kfree(mapping->menu_info);
kfree(mapping);
}
}
void uvc_ctrl_cleanup_device(struct uvc_device *dev)
{
struct uvc_entity *entity;
unsigned int i;
/* Free controls and control mappings for all entities. */
list_for_each_entry(entity, &dev->entities, list) {
for (i = 0; i < entity->ncontrols; ++i) {
struct uvc_control *ctrl = &entity->controls[i];
if (!ctrl->initialized)
continue;
uvc_ctrl_cleanup_mappings(dev, ctrl);
kfree(ctrl->uvc_data);
}
kfree(entity->controls);
}
}
| gpl-2.0 |
chandrusiva/litmus-rt | arch/arm/mach-shmobile/board-lager.c | 2042 | 1401 | /*
* Lager board support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <mach/common.h>
#include <mach/r8a7790.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
static void __init lager_add_standard_devices(void)
{
r8a7790_clock_init();
r8a7790_add_standard_devices();
}
static const char *lager_boards_compat_dt[] __initdata = {
"renesas,lager",
NULL,
};
DT_MACHINE_START(LAGER_DT, "lager")
.init_irq = irqchip_init,
.init_time = r8a7790_timer_init,
.init_machine = lager_add_standard_devices,
.dt_compat = lager_boards_compat_dt,
MACHINE_END
| gpl-2.0 |
lzh6710/ubuntu-trusty | drivers/regulator/pcf50633-regulator.c | 2042 | 3972 | /* NXP PCF50633 PMIC Driver
*
* (C) 2006-2008 by Openmoko, Inc.
* Author: Balaji Rao <balajirrao@openmoko.org>
* All rights reserved.
*
* Broken down from monstrous PCF50633 driver mainly by
* Harald Welte and Andy Green and Werner Almesberger
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/pmic.h>
#define PCF50633_REGULATOR(_name, _id, _min_uV, _uV_step, _min_sel, _n) \
{ \
.name = _name, \
.id = PCF50633_REGULATOR_##_id, \
.ops = &pcf50633_regulator_ops, \
.n_voltages = _n, \
.min_uV = _min_uV, \
.uV_step = _uV_step, \
.linear_min_sel = _min_sel, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = PCF50633_REG_##_id##OUT, \
.vsel_mask = 0xff, \
.enable_reg = PCF50633_REG_##_id##OUT + 1, \
.enable_mask = PCF50633_REGULATOR_ON, \
}
static struct regulator_ops pcf50633_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_desc regulators[] = {
[PCF50633_REGULATOR_AUTO] =
PCF50633_REGULATOR("auto", AUTO, 1800000, 25000, 0x2f, 128),
[PCF50633_REGULATOR_DOWN1] =
PCF50633_REGULATOR("down1", DOWN1, 625000, 25000, 0, 96),
[PCF50633_REGULATOR_DOWN2] =
PCF50633_REGULATOR("down2", DOWN2, 625000, 25000, 0, 96),
[PCF50633_REGULATOR_LDO1] =
PCF50633_REGULATOR("ldo1", LDO1, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO2] =
PCF50633_REGULATOR("ldo2", LDO2, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO3] =
PCF50633_REGULATOR("ldo3", LDO3, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO4] =
PCF50633_REGULATOR("ldo4", LDO4, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO5] =
PCF50633_REGULATOR("ldo5", LDO5, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO6] =
PCF50633_REGULATOR("ldo6", LDO6, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_HCLDO] =
PCF50633_REGULATOR("hcldo", HCLDO, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_MEMLDO] =
PCF50633_REGULATOR("memldo", MEMLDO, 900000, 100000, 0, 28),
};
static int pcf50633_regulator_probe(struct platform_device *pdev)
{
struct regulator_dev *rdev;
struct pcf50633 *pcf;
struct regulator_config config = { };
/* Already set by core driver */
pcf = dev_to_pcf50633(pdev->dev.parent);
config.dev = &pdev->dev;
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = pcf;
config.regmap = pcf->regmap;
rdev = devm_regulator_register(&pdev->dev, ®ulators[pdev->id],
&config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
platform_set_drvdata(pdev, rdev);
if (pcf->pdata->regulator_registered)
pcf->pdata->regulator_registered(pcf, pdev->id);
return 0;
}
static struct platform_driver pcf50633_regulator_driver = {
.driver = {
.name = "pcf50633-regulator",
},
.probe = pcf50633_regulator_probe,
};
static int __init pcf50633_regulator_init(void)
{
return platform_driver_register(&pcf50633_regulator_driver);
}
subsys_initcall(pcf50633_regulator_init);
static void __exit pcf50633_regulator_exit(void)
{
platform_driver_unregister(&pcf50633_regulator_driver);
}
module_exit(pcf50633_regulator_exit);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcf50633-regulator");
| gpl-2.0 |
utilite-computer/linux-kernel-3.0 | arch/arm/mach-at91/board-usb-a9260.c | 2298 | 5306 | /*
* linux/arch/arm/mach-at91/board-usb-a9260.c
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2006 Atmel
* Copyright (C) 2007 Calao-systems
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/clk.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
#include "sam9_smc.h"
#include "generic.h"
static void __init ek_init_early(void)
{
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
static void __init ek_init_irq(void)
{
at91sam9260_init_interrupts(NULL);
}
/*
* USB Host port
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
};
/*
* USB Device port
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PC5,
.pullup_pin = 0, /* pull-up driven by UDC */
};
/*
* MACB Ethernet device
*/
static struct at91_eth_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PA31,
.is_rmii = 1,
};
/*
* NAND flash
*/
static struct mtd_partition __initdata ek_nand_partition[] = {
{
.name = "Uboot & Kernel",
.offset = 0,
.size = SZ_16M,
},
{
.name = "Root FS",
.offset = MTDPART_OFS_NXTBLK,
.size = 120 * SZ_1M,
},
{
.name = "FS",
.offset = MTDPART_OFS_NXTBLK,
.size = 120 * SZ_1M,
}
};
static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
{
*num_partitions = ARRAY_SIZE(ek_nand_partition);
return ek_nand_partition;
}
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
// .det_pin = ... not connected
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.partition_info = nand_partitions,
};
static struct sam9_smc_config __initdata ek_nand_smc_config = {
.ncs_read_setup = 0,
.nrd_setup = 1,
.ncs_write_setup = 0,
.nwe_setup = 1,
.ncs_read_pulse = 3,
.nrd_pulse = 3,
.ncs_write_pulse = 3,
.nwe_pulse = 3,
.read_cycle = 5,
.write_cycle = 5,
.mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
.tdf_cycles = 2,
};
static void __init ek_add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
sam9_smc_configure(3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
/*
* GPIO Buttons
*/
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
static struct gpio_keys_button ek_buttons[] = {
{ /* USER PUSH BUTTON */
.code = KEY_ENTER,
.gpio = AT91_PIN_PB10,
.active_low = 1,
.desc = "user_pb",
.wakeup = 1,
}
};
static struct gpio_keys_platform_data ek_button_data = {
.buttons = ek_buttons,
.nbuttons = ARRAY_SIZE(ek_buttons),
};
static struct platform_device ek_button_device = {
.name = "gpio-keys",
.id = -1,
.num_resources = 0,
.dev = {
.platform_data = &ek_button_data,
}
};
static void __init ek_add_device_buttons(void)
{
at91_set_GPIO_periph(AT91_PIN_PB10, 1); /* user push button, pull up enabled */
at91_set_deglitch(AT91_PIN_PB10, 1);
platform_device_register(&ek_button_device);
}
#else
static void __init ek_add_device_buttons(void) {}
#endif
/*
* LEDs
*/
static struct gpio_led ek_leds[] = {
{ /* user_led (green) */
.name = "user_led",
.gpio = AT91_PIN_PB21,
.active_low = 0,
.default_trigger = "heartbeat",
}
};
static void __init ek_board_init(void)
{
/* Serial */
at91_add_device_serial();
/* USB Host */
at91_add_device_usbh(&ek_usbh_data);
/* USB Device */
at91_add_device_udc(&ek_udc_data);
/* NAND */
ek_add_device_nand();
/* I2C */
at91_add_device_i2c(NULL, 0);
/* Ethernet */
at91_add_device_eth(&ek_macb_data);
/* Push Buttons */
ek_add_device_buttons();
/* LEDs */
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
/* shutdown controller, wakeup button (5 msec low) */
at91_sys_write(AT91_SHDW_MR, AT91_SHDW_CPTWK0_(10) | AT91_SHDW_WKMODE0_LOW
| AT91_SHDW_RTTWKEN);
}
MACHINE_START(USB_A9260, "CALAO USB_A9260")
/* Maintainer: calao-systems */
.timer = &at91sam926x_timer,
.map_io = at91sam9260_map_io,
.init_early = ek_init_early,
.init_irq = ek_init_irq,
.init_machine = ek_board_init,
MACHINE_END
| gpl-2.0 |
javelinanddart/shamu | drivers/staging/bcm/led_control.c | 2298 | 26808 | #include "headers.h"
#define STATUS_IMAGE_CHECKSUM_MISMATCH -199
#define EVENT_SIGNALED 1
static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size)
{
B_UINT16 u16CheckSum = 0;
while (u32Size--) {
u16CheckSum += (B_UINT8)~(*pu8Buffer);
pu8Buffer++;
}
return u16CheckSum;
}
BOOLEAN IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios)
{
INT Status;
Status = (Adapter->gpioBitMap & gpios) ^ gpios;
if (Status)
return FALSE;
else
return TRUE;
}
static INT LED_Blink(struct bcm_mini_adapter *Adapter, UINT GPIO_Num, UCHAR uiLedIndex,
ULONG timeout, INT num_of_time, enum bcm_led_events currdriverstate)
{
int Status = STATUS_SUCCESS;
BOOLEAN bInfinite = FALSE;
/* Check if num_of_time is -ve. If yes, blink led in infinite loop */
if (num_of_time < 0) {
bInfinite = TRUE;
num_of_time = 1;
}
while (num_of_time) {
if (currdriverstate == Adapter->DriverState)
TURN_ON_LED(GPIO_Num, uiLedIndex);
/* Wait for timeout after setting on the LED */
Status = wait_event_interruptible_timeout(
Adapter->LEDInfo.notify_led_event,
currdriverstate != Adapter->DriverState ||
kthread_should_stop(),
msecs_to_jiffies(timeout));
if (kthread_should_stop()) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"Led thread got signal to exit..hence exiting");
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_DISABLED;
TURN_OFF_LED(GPIO_Num, uiLedIndex);
Status = EVENT_SIGNALED;
break;
}
if (Status) {
TURN_OFF_LED(GPIO_Num, uiLedIndex);
Status = EVENT_SIGNALED;
break;
}
TURN_OFF_LED(GPIO_Num, uiLedIndex);
Status = wait_event_interruptible_timeout(
Adapter->LEDInfo.notify_led_event,
currdriverstate != Adapter->DriverState ||
kthread_should_stop(),
msecs_to_jiffies(timeout));
if (bInfinite == FALSE)
num_of_time--;
}
return Status;
}
static INT ScaleRateofTransfer(ULONG rate)
{
if (rate <= 3)
return rate;
else if ((rate > 3) && (rate <= 100))
return 5;
else if ((rate > 100) && (rate <= 200))
return 6;
else if ((rate > 200) && (rate <= 300))
return 7;
else if ((rate > 300) && (rate <= 400))
return 8;
else if ((rate > 400) && (rate <= 500))
return 9;
else if ((rate > 500) && (rate <= 600))
return 10;
else
return MAX_NUM_OF_BLINKS;
}
static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter, UCHAR GPIO_Num_tx,
UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex,
enum bcm_led_events currdriverstate)
{
/* Initial values of TX and RX packets */
ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0;
/* values of TX and RX packets after 1 sec */
ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0;
/* Rate of transfer of Tx and Rx in 1 sec */
ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0;
int Status = STATUS_SUCCESS;
INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0;
UINT remDelay = 0;
BOOLEAN bBlinkBothLED = TRUE;
/* UINT GPIO_num = DISABLE_GPIO_NUM; */
ulong timeout = 0;
/* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
/* Scale the rate of transfer to no of blinks. */
num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
while ((Adapter->device_removed == FALSE)) {
timeout = 50;
/*
* Blink Tx and Rx LED when both Tx and Rx is
* in normal bandwidth
*/
if (bBlinkBothLED) {
/*
* Assign minimum number of blinks of
* either Tx or Rx.
*/
if (num_of_time_tx > num_of_time_rx)
num_of_time = num_of_time_rx;
else
num_of_time = num_of_time_tx;
if (num_of_time > 0) {
/* Blink both Tx and Rx LEDs */
if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
uiTxLedIndex, timeout,
num_of_time, currdriverstate)
== EVENT_SIGNALED)
return EVENT_SIGNALED;
if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
uiRxLedIndex, timeout,
num_of_time, currdriverstate)
== EVENT_SIGNALED)
return EVENT_SIGNALED;
}
if (num_of_time == num_of_time_tx) {
/* Blink pending rate of Rx */
if (LED_Blink(Adapter, (1 << GPIO_Num_rx),
uiRxLedIndex, timeout,
num_of_time_rx-num_of_time,
currdriverstate)
== EVENT_SIGNALED)
return EVENT_SIGNALED;
num_of_time = num_of_time_rx;
} else {
/* Blink pending rate of Tx */
if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
uiTxLedIndex, timeout,
num_of_time_tx-num_of_time,
currdriverstate)
== EVENT_SIGNALED)
return EVENT_SIGNALED;
num_of_time = num_of_time_tx;
}
} else {
if (num_of_time == num_of_time_tx) {
/* Blink pending rate of Rx */
if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
uiTxLedIndex, timeout,
num_of_time, currdriverstate)
== EVENT_SIGNALED)
return EVENT_SIGNALED;
} else {
/* Blink pending rate of Tx */
if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
uiRxLedIndex, timeout,
num_of_time, currdriverstate)
== EVENT_SIGNALED)
return EVENT_SIGNALED;
}
}
/*
* If Tx/Rx rate is less than maximum blinks per second,
* wait till delay completes to 1 second
*/
remDelay = MAX_NUM_OF_BLINKS - num_of_time;
if (remDelay > 0) {
timeout = 100 * remDelay;
Status = wait_event_interruptible_timeout(
Adapter->LEDInfo.notify_led_event,
currdriverstate != Adapter->DriverState
|| kthread_should_stop(),
msecs_to_jiffies(timeout));
if (kthread_should_stop()) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
LED_DUMP_INFO, DBG_LVL_ALL,
"Led thread got signal to exit..hence exiting");
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_DISABLED;
return EVENT_SIGNALED;
}
if (Status)
return EVENT_SIGNALED;
}
/* Turn off both Tx and Rx LEDs before next second */
TURN_OFF_LED(1 << GPIO_Num_tx, uiTxLedIndex);
TURN_OFF_LED(1 << GPIO_Num_rx, uiTxLedIndex);
/*
* Read the Tx & Rx packets transmission after 1 second and
* calculate rate of transfer
*/
Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
rate_of_transfer_tx = Final_num_of_packts_tx -
Initial_num_of_packts_tx;
rate_of_transfer_rx = Final_num_of_packts_rx -
Initial_num_of_packts_rx;
/* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Final_num_of_packts_tx;
Initial_num_of_packts_rx = Final_num_of_packts_rx;
/* Scale the rate of transfer to no of blinks. */
num_of_time_tx =
ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
num_of_time_rx =
ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
}
return Status;
}
/*
* -----------------------------------------------------------------------------
* Procedure: ValidateDSDParamsChecksum
*
* Description: Reads DSD Params and validates checkusm.
*
* Arguments:
* Adapter - Pointer to Adapter structure.
* ulParamOffset - Start offset of the DSD parameter to be read and
* validated.
* usParamLen - Length of the DSD Parameter.
*
* Returns:
* <OSAL_STATUS_CODE>
* -----------------------------------------------------------------------------
*/
static INT ValidateDSDParamsChecksum(struct bcm_mini_adapter *Adapter, ULONG ulParamOffset,
USHORT usParamLen)
{
INT Status = STATUS_SUCCESS;
PUCHAR puBuffer = NULL;
USHORT usChksmOrg = 0;
USHORT usChecksumCalculated = 0;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",
ulParamOffset, usParamLen);
puBuffer = kmalloc(usParamLen, GFP_KERNEL);
if (!puBuffer) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"LED Thread: ValidateDSDParamsChecksum Allocation failed");
return -ENOMEM;
}
/* Read the DSD data from the parameter offset. */
if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer,
ulParamOffset, usParamLen)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
/* Calculate the checksum of the data read from the DSD parameter. */
usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"LED Thread: usCheckSumCalculated = 0x%x\n",
usChecksumCalculated);
/*
* End of the DSD parameter will have a TWO bytes checksum stored in it.
* Read it and compare with the calculated Checksum.
*/
if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg,
ulParamOffset+usParamLen, 2)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
usChksmOrg = ntohs(usChksmOrg);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"LED Thread: usChksmOrg = 0x%x", usChksmOrg);
/*
* Compare the checksum calculated with the checksum read
* from DSD section
*/
if (usChecksumCalculated ^ usChksmOrg) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
exit:
kfree(puBuffer);
return Status;
}
/*
* -----------------------------------------------------------------------------
* Procedure: ValidateHWParmStructure
*
* Description: Validates HW Parameters.
*
* Arguments:
* Adapter - Pointer to Adapter structure.
* ulHwParamOffset - Start offset of the HW parameter Section to be read
* and validated.
*
* Returns:
* <OSAL_STATUS_CODE>
* -----------------------------------------------------------------------------
*/
static INT ValidateHWParmStructure(struct bcm_mini_adapter *Adapter, ULONG ulHwParamOffset)
{
INT Status = STATUS_SUCCESS;
USHORT HwParamLen = 0;
/*
* Add DSD start offset to the hwParamOffset to get
* the actual address.
*/
ulHwParamOffset += DSD_START_OFFSET;
/* Read the Length of HW_PARAM structure */
BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2);
HwParamLen = ntohs(HwParamLen);
if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
return STATUS_IMAGE_CHECKSUM_MISMATCH;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"LED Thread:HwParamLen = 0x%x", HwParamLen);
Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset,
HwParamLen);
return Status;
} /* ValidateHWParmStructure() */
static int ReadLEDInformationFromEEPROM(struct bcm_mini_adapter *Adapter,
UCHAR GPIO_Array[])
{
int Status = STATUS_SUCCESS;
ULONG dwReadValue = 0;
USHORT usHwParamData = 0;
USHORT usEEPROMVersion = 0;
UCHAR ucIndex = 0;
UCHAR ucGPIOInfo[32] = {0};
BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion,
EEPROM_VERSION_OFFSET, 2);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"usEEPROMVersion: Minor:0x%X Major:0x%x",
usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF));
if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) {
BeceemNVMRead(Adapter, (PUINT)&usHwParamData,
EEPROM_HW_PARAM_POINTER_ADDRESS, 2);
usHwParamData = ntohs(usHwParamData);
dwReadValue = usHwParamData;
} else {
/*
* Validate Compatibility section and then read HW param
* if compatibility section is valid.
*/
Status = ValidateDSDParamsChecksum(Adapter,
DSD_START_OFFSET,
COMPATIBILITY_SECTION_LENGTH_MAP5);
if (Status != STATUS_SUCCESS)
return Status;
BeceemNVMRead(Adapter, (PUINT)&dwReadValue,
EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4);
dwReadValue = ntohl(dwReadValue);
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"LED Thread: Start address of HW_PARAM structure = 0x%lx",
dwReadValue);
/*
* Validate if the address read out is within the DSD.
* Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
* lower limit should be above DSD_START_OFFSET and
* upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
*/
if (dwReadValue < DSD_START_OFFSET ||
dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
return STATUS_IMAGE_CHECKSUM_MISMATCH;
Status = ValidateHWParmStructure(Adapter, dwReadValue);
if (Status)
return Status;
/*
* Add DSD_START_OFFSET to the offset read from the EEPROM.
* This will give the actual start HW Parameters start address.
* To read GPIO section, add GPIO offset further.
*/
dwReadValue +=
DSD_START_OFFSET; /* = start address of hw param section. */
dwReadValue += GPIO_SECTION_START_OFFSET;
/* = GPIO start offset within HW Param section. */
/*
* Read the GPIO values for 32 GPIOs from EEPROM and map the function
* number to GPIO pin number to GPIO_Array
*/
BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32);
for (ucIndex = 0; ucIndex < 32; ucIndex++) {
switch (ucGPIOInfo[ucIndex]) {
case RED_LED:
GPIO_Array[RED_LED] = ucIndex;
Adapter->gpioBitMap |= (1 << ucIndex);
break;
case BLUE_LED:
GPIO_Array[BLUE_LED] = ucIndex;
Adapter->gpioBitMap |= (1 << ucIndex);
break;
case YELLOW_LED:
GPIO_Array[YELLOW_LED] = ucIndex;
Adapter->gpioBitMap |= (1 << ucIndex);
break;
case GREEN_LED:
GPIO_Array[GREEN_LED] = ucIndex;
Adapter->gpioBitMap |= (1 << ucIndex);
break;
default:
break;
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"GPIO's bit map correspond to LED :0x%X", Adapter->gpioBitMap);
return Status;
}
static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter,
BOOLEAN *bEnableThread)
{
int Status = STATUS_SUCCESS;
/* Array to store GPIO numbers from EEPROM */
UCHAR GPIO_Array[NUM_OF_LEDS+1];
UINT uiIndex = 0;
UINT uiNum_of_LED_Type = 0;
PUCHAR puCFGData = NULL;
UCHAR bData = 0;
memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) {
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL, "Target Params not Avail.\n");
return -ENOENT;
}
/* Populate GPIO_Array with GPIO numbers for LED functions */
/* Read the GPIO numbers from EEPROM */
Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array);
if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) {
*bEnableThread = FALSE;
return STATUS_SUCCESS;
} else if (Status) {
*bEnableThread = FALSE;
return Status;
}
/*
* CONFIG file read successfully. Deallocate the memory of
* uiFileNameBufferSize
*/
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
"LED Thread: Config file read successfully\n");
puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1;
/*
* Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
* will have the information of LED type, LED on state for different
* driver state and LED blink state.
*/
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
bData = *puCFGData;
/*
* Check Bit 8 for polarity. If it is set,
* polarity is reverse polarity
*/
if (bData & 0x80) {
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 0;
/* unset the bit 8 */
bData = bData & 0x7f;
}
Adapter->LEDInfo.LEDState[uiIndex].LED_Type = bData;
if (bData <= NUM_OF_LEDS)
Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
GPIO_Array[bData];
else
Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
DISABLE_GPIO_NUM;
puCFGData++;
bData = *puCFGData;
Adapter->LEDInfo.LEDState[uiIndex].LED_On_State = bData;
puCFGData++;
bData = *puCFGData;
Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State = bData;
puCFGData++;
}
/*
* Check if all the LED settings are disabled. If it is disabled,
* dont launch the LED control thread.
*/
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
if ((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) ||
(Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) ||
(Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0))
uiNum_of_LED_Type++;
}
if (uiNum_of_LED_Type >= NUM_OF_LEDS)
*bEnableThread = FALSE;
return Status;
}
/*
* -----------------------------------------------------------------------------
* Procedure: LedGpioInit
*
* Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode
* and make the initial state to be OFF.
*
* Arguments:
* Adapter - Pointer to MINI_ADAPTER structure.
*
* Returns: VOID
*
* -----------------------------------------------------------------------------
*/
static VOID LedGpioInit(struct bcm_mini_adapter *Adapter)
{
UINT uiResetValue = 0;
UINT uiIndex = 0;
/* Set all LED GPIO Mode to output mode */
if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
sizeof(uiResetValue)) < 0)
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL, "LED Thread: RDM Failed\n");
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
DISABLE_GPIO_NUM)
uiResetValue |= (1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num);
TURN_OFF_LED(1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num,
uiIndex);
}
if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
sizeof(uiResetValue)) < 0)
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL, "LED Thread: WRM Failed\n");
Adapter->LEDInfo.bIdle_led_off = FALSE;
}
static INT BcmGetGPIOPinInfo(struct bcm_mini_adapter *Adapter, UCHAR *GPIO_num_tx,
UCHAR *GPIO_num_rx, UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,
enum bcm_led_events currdriverstate)
{
UINT uiIndex = 0;
*GPIO_num_tx = DISABLE_GPIO_NUM;
*GPIO_num_rx = DISABLE_GPIO_NUM;
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
if ((currdriverstate == NORMAL_OPERATION) ||
(currdriverstate == IDLEMODE_EXIT) ||
(currdriverstate == FW_DOWNLOAD)) {
if (Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State &
currdriverstate) {
if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
!= DISABLE_GPIO_NUM) {
if (*GPIO_num_tx == DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
} else {
*GPIO_num_rx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedRxIndex = uiIndex;
}
}
}
} else {
if (Adapter->LEDInfo.LEDState[uiIndex].LED_On_State
& currdriverstate) {
if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
!= DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
}
}
}
}
return STATUS_SUCCESS;
}
static VOID LEDControlThread(struct bcm_mini_adapter *Adapter)
{
UINT uiIndex = 0;
UCHAR GPIO_num = 0;
UCHAR uiLedIndex = 0;
UINT uiResetValue = 0;
enum bcm_led_events currdriverstate = 0;
ulong timeout = 0;
INT Status = 0;
UCHAR dummyGPIONum = 0;
UCHAR dummyIndex = 0;
/* currdriverstate = Adapter->DriverState; */
Adapter->LEDInfo.bIdleMode_tx_from_host = FALSE;
/*
* Wait till event is triggered
*
* wait_event(Adapter->LEDInfo.notify_led_event,
* currdriverstate!= Adapter->DriverState);
*/
GPIO_num = DISABLE_GPIO_NUM;
while (TRUE) {
/* Wait till event is triggered */
if ((GPIO_num == DISABLE_GPIO_NUM)
||
((currdriverstate != FW_DOWNLOAD) &&
(currdriverstate != NORMAL_OPERATION) &&
(currdriverstate != LOWPOWER_MODE_ENTER))
||
(currdriverstate == LED_THREAD_INACTIVE))
Status = wait_event_interruptible(
Adapter->LEDInfo.notify_led_event,
currdriverstate != Adapter->DriverState
|| kthread_should_stop());
if (kthread_should_stop() || Adapter->device_removed) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"Led thread got signal to exit..hence exiting");
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_DISABLED;
TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
return; /* STATUS_FAILURE; */
}
if (GPIO_num != DISABLE_GPIO_NUM)
TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
if (Adapter->LEDInfo.bLedInitDone == FALSE) {
LedGpioInit(Adapter);
Adapter->LEDInfo.bLedInitDone = TRUE;
}
switch (Adapter->DriverState) {
case DRIVER_INIT:
currdriverstate = DRIVER_INIT;
/* Adapter->DriverState; */
BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
&uiLedIndex, &dummyIndex, currdriverstate);
if (GPIO_num != DISABLE_GPIO_NUM)
TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
case FW_DOWNLOAD:
/*
* BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
* LED_DUMP_INFO, DBG_LVL_ALL,
* "LED Thread: FW_DN_DONE called\n");
*/
currdriverstate = FW_DOWNLOAD;
BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
&uiLedIndex, &dummyIndex, currdriverstate);
if (GPIO_num != DISABLE_GPIO_NUM) {
timeout = 50;
LED_Blink(Adapter, 1 << GPIO_num, uiLedIndex,
timeout, -1, currdriverstate);
}
break;
case FW_DOWNLOAD_DONE:
currdriverstate = FW_DOWNLOAD_DONE;
BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
&uiLedIndex, &dummyIndex, currdriverstate);
if (GPIO_num != DISABLE_GPIO_NUM)
TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
case SHUTDOWN_EXIT:
/*
* no break, continue to NO_NETWORK_ENTRY
* state as well.
*/
case NO_NETWORK_ENTRY:
currdriverstate = NO_NETWORK_ENTRY;
BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
&uiLedIndex, &dummyGPIONum, currdriverstate);
if (GPIO_num != DISABLE_GPIO_NUM)
TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
case NORMAL_OPERATION:
{
UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
UCHAR uiLEDTx = 0;
UCHAR uiLEDRx = 0;
currdriverstate = NORMAL_OPERATION;
Adapter->LEDInfo.bIdle_led_off = FALSE;
BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx,
&GPIO_num_rx, &uiLEDTx, &uiLEDRx,
currdriverstate);
if ((GPIO_num_tx == DISABLE_GPIO_NUM) &&
(GPIO_num_rx ==
DISABLE_GPIO_NUM)) {
GPIO_num = DISABLE_GPIO_NUM;
} else {
/*
* If single LED is selected, use same
* for both Tx and Rx
*/
if (GPIO_num_tx == DISABLE_GPIO_NUM) {
GPIO_num_tx = GPIO_num_rx;
uiLEDTx = uiLEDRx;
} else if (GPIO_num_rx ==
DISABLE_GPIO_NUM) {
GPIO_num_rx = GPIO_num_tx;
uiLEDRx = uiLEDTx;
}
/*
* Blink the LED in proportionate
* to Tx and Rx transmissions.
*/
LED_Proportional_Blink(Adapter,
GPIO_num_tx, uiLEDTx,
GPIO_num_rx, uiLEDRx,
currdriverstate);
}
}
break;
case LOWPOWER_MODE_ENTER:
currdriverstate = LOWPOWER_MODE_ENTER;
if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING ==
Adapter->ulPowerSaveMode) {
/* Turn OFF all the LED */
uiResetValue = 0;
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
}
/* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
Adapter->LEDInfo.bLedInitDone = FALSE;
Adapter->LEDInfo.bIdle_led_off = TRUE;
wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
GPIO_num = DISABLE_GPIO_NUM;
break;
case IDLEMODE_CONTINUE:
currdriverstate = IDLEMODE_CONTINUE;
GPIO_num = DISABLE_GPIO_NUM;
break;
case IDLEMODE_EXIT:
break;
case DRIVER_HALT:
currdriverstate = DRIVER_HALT;
GPIO_num = DISABLE_GPIO_NUM;
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
!= DISABLE_GPIO_NUM)
TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
/* Adapter->DriverState = DRIVER_INIT; */
break;
case LED_THREAD_INACTIVE:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL, "InActivating LED thread...");
currdriverstate = LED_THREAD_INACTIVE;
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_RUNNING_INACTIVELY;
Adapter->LEDInfo.bLedInitDone = FALSE;
/* disable ALL LED */
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
!= DISABLE_GPIO_NUM)
TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
break;
case LED_THREAD_ACTIVE:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL, "Activating LED thread again...");
if (Adapter->LinkUpStatus == FALSE)
Adapter->DriverState = NO_NETWORK_ENTRY;
else
Adapter->DriverState = NORMAL_OPERATION;
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_RUNNING_ACTIVELY;
break;
/* return; */
default:
break;
}
}
Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
}
int InitLedSettings(struct bcm_mini_adapter *Adapter)
{
int Status = STATUS_SUCCESS;
BOOLEAN bEnableThread = TRUE;
UCHAR uiIndex = 0;
/*
* Initially set BitPolarity to normal polarity. The bit 8 of LED type
* is used to change the polarity of the LED.
*/
for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1;
/*
* Read the LED settings of CONFIG file and map it
* to GPIO numbers in EEPROM
*/
Status = ReadConfigFileStructure(Adapter, &bEnableThread);
if (STATUS_SUCCESS != Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"LED Thread: FAILED in ReadConfigFileStructure\n");
return Status;
}
if (Adapter->LEDInfo.led_thread_running) {
if (bEnableThread) {
;
} else {
Adapter->DriverState = DRIVER_HALT;
wake_up(&Adapter->LEDInfo.notify_led_event);
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_DISABLED;
}
} else if (bEnableThread) {
/* Create secondary thread to handle the LEDs */
init_waitqueue_head(&Adapter->LEDInfo.notify_led_event);
init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent);
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_RUNNING_ACTIVELY;
Adapter->LEDInfo.bIdle_led_off = FALSE;
Adapter->LEDInfo.led_cntrl_threadid =
kthread_run((int (*)(void *)) LEDControlThread,
Adapter, "led_control_thread");
if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
DBG_LVL_ALL,
"Not able to spawn Kernel Thread\n");
Adapter->LEDInfo.led_thread_running =
BCM_LED_THREAD_DISABLED;
return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
}
}
return Status;
}
| gpl-2.0 |
tiagormk/linux-vitamins-odroidxu | arch/mips/alchemy/devboards/db1235.c | 2298 | 2081 | /*
* DB1200/PB1200 / DB1550 / DB1300 board support.
*
* These 4 boards can reliably be supported in a single kernel image.
*/
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
int __init db1200_board_setup(void);
int __init db1200_dev_setup(void);
int __init db1300_board_setup(void);
int __init db1300_dev_setup(void);
int __init db1550_board_setup(void);
int __init db1550_dev_setup(void);
int __init db1550_pci_setup(int);
static const char *board_type_str(void)
{
switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
case BCSR_WHOAMI_PB1200_DDR1:
case BCSR_WHOAMI_PB1200_DDR2:
return "PB1200";
case BCSR_WHOAMI_DB1200:
return "DB1200";
case BCSR_WHOAMI_DB1300:
return "DB1300";
case BCSR_WHOAMI_DB1550:
return "DB1550";
case BCSR_WHOAMI_PB1550_SDR:
case BCSR_WHOAMI_PB1550_DDR:
return "PB1550";
default:
return "(unknown)";
}
}
const char *get_system_type(void)
{
return board_type_str();
}
void __init board_setup(void)
{
int ret;
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1550:
ret = db1550_board_setup();
break;
case ALCHEMY_CPU_AU1200:
ret = db1200_board_setup();
break;
case ALCHEMY_CPU_AU1300:
ret = db1300_board_setup();
break;
default:
pr_err("unsupported CPU on board\n");
ret = -ENODEV;
}
if (ret)
panic("cannot initialize board support\n");
}
int __init db1235_arch_init(void)
{
int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
if (id == BCSR_WHOAMI_DB1550)
return db1550_pci_setup(0);
else if ((id == BCSR_WHOAMI_PB1550_SDR) ||
(id == BCSR_WHOAMI_PB1550_DDR))
return db1550_pci_setup(1);
return 0;
}
arch_initcall(db1235_arch_init);
int __init db1235_dev_init(void)
{
switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
case BCSR_WHOAMI_PB1200_DDR1:
case BCSR_WHOAMI_PB1200_DDR2:
case BCSR_WHOAMI_DB1200:
return db1200_dev_setup();
case BCSR_WHOAMI_DB1300:
return db1300_dev_setup();
case BCSR_WHOAMI_DB1550:
case BCSR_WHOAMI_PB1550_SDR:
case BCSR_WHOAMI_PB1550_DDR:
return db1550_dev_setup();
}
return 0;
}
device_initcall(db1235_dev_init);
| gpl-2.0 |
shminer/android_kernel_motorola_olympus-jz | arch/arm/mach-bcmring/irq.c | 2810 | 3674 | /*
*
* Copyright (C) 1999 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/stddef.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/version.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <mach/csp/intcHw_reg.h>
#include <mach/csp/mm_io.h>
static void bcmring_mask_irq0(struct irq_data *d)
{
writel(1 << (d->irq - IRQ_INTC0_START),
MM_IO_BASE_INTC0 + INTCHW_INTENCLEAR);
}
static void bcmring_unmask_irq0(struct irq_data *d)
{
writel(1 << (d->irq - IRQ_INTC0_START),
MM_IO_BASE_INTC0 + INTCHW_INTENABLE);
}
static void bcmring_mask_irq1(struct irq_data *d)
{
writel(1 << (d->irq - IRQ_INTC1_START),
MM_IO_BASE_INTC1 + INTCHW_INTENCLEAR);
}
static void bcmring_unmask_irq1(struct irq_data *d)
{
writel(1 << (d->irq - IRQ_INTC1_START),
MM_IO_BASE_INTC1 + INTCHW_INTENABLE);
}
static void bcmring_mask_irq2(struct irq_data *d)
{
writel(1 << (d->irq - IRQ_SINTC_START),
MM_IO_BASE_SINTC + INTCHW_INTENCLEAR);
}
static void bcmring_unmask_irq2(struct irq_data *d)
{
writel(1 << (d->irq - IRQ_SINTC_START),
MM_IO_BASE_SINTC + INTCHW_INTENABLE);
}
static struct irq_chip bcmring_irq0_chip = {
.name = "ARM-INTC0",
.irq_ack = bcmring_mask_irq0,
.irq_mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */
.irq_unmask = bcmring_unmask_irq0, /* unmaks an interrupt */
};
static struct irq_chip bcmring_irq1_chip = {
.name = "ARM-INTC1",
.irq_ack = bcmring_mask_irq1,
.irq_mask = bcmring_mask_irq1,
.irq_unmask = bcmring_unmask_irq1,
};
static struct irq_chip bcmring_irq2_chip = {
.name = "ARM-SINTC",
.irq_ack = bcmring_mask_irq2,
.irq_mask = bcmring_mask_irq2,
.irq_unmask = bcmring_unmask_irq2,
};
static void vic_init(void __iomem *base, struct irq_chip *chip,
unsigned int irq_start, unsigned int vic_sources)
{
unsigned int i;
for (i = 0; i < 32; i++) {
unsigned int irq = irq_start + i;
irq_set_chip(irq, chip);
irq_set_chip_data(irq, base);
if (vic_sources & (1 << i)) {
irq_set_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
writel(0, base + INTCHW_INTSELECT);
writel(0, base + INTCHW_INTENABLE);
writel(~0, base + INTCHW_INTENCLEAR);
writel(0, base + INTCHW_IRQSTATUS);
writel(~0, base + INTCHW_SOFTINTCLEAR);
}
void __init bcmring_init_irq(void)
{
vic_init((void __iomem *)MM_IO_BASE_INTC0, &bcmring_irq0_chip,
IRQ_INTC0_START, IRQ_INTC0_VALID_MASK);
vic_init((void __iomem *)MM_IO_BASE_INTC1, &bcmring_irq1_chip,
IRQ_INTC1_START, IRQ_INTC1_VALID_MASK);
vic_init((void __iomem *)MM_IO_BASE_SINTC, &bcmring_irq2_chip,
IRQ_SINTC_START, IRQ_SINTC_VALID_MASK);
/* special cases */
if (INTCHW_INTC1_GPIO0 & IRQ_INTC1_VALID_MASK) {
irq_set_handler(IRQ_GPIO0, handle_simple_irq);
}
if (INTCHW_INTC1_GPIO1 & IRQ_INTC1_VALID_MASK) {
irq_set_handler(IRQ_GPIO1, handle_simple_irq);
}
}
| gpl-2.0 |
neldar/kernel-2.6.32.9-i9000-bln-only | sound/synth/emux/emux_oss.c | 4602 | 12022 | /*
* Interface for OSS sequencer emulation
*
* Copyright (C) 1999 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Changes
* 19990227 Steve Ratcliffe Made separate file and merged in latest
* midi emulation.
*/
#ifdef CONFIG_SND_SEQUENCER_OSS
#include <asm/uaccess.h>
#include <sound/core.h>
#include "emux_voice.h"
#include <sound/asoundef.h>
static int snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure);
static int snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg);
static int snd_emux_ioctl_seq_oss(struct snd_seq_oss_arg *arg, unsigned int cmd,
unsigned long ioarg);
static int snd_emux_load_patch_seq_oss(struct snd_seq_oss_arg *arg, int format,
const char __user *buf, int offs, int count);
static int snd_emux_reset_seq_oss(struct snd_seq_oss_arg *arg);
static int snd_emux_event_oss_input(struct snd_seq_event *ev, int direct,
void *private, int atomic, int hop);
static void reset_port_mode(struct snd_emux_port *port, int midi_mode);
static void emuspec_control(struct snd_emux *emu, struct snd_emux_port *port,
int cmd, unsigned char *event, int atomic, int hop);
static void gusspec_control(struct snd_emux *emu, struct snd_emux_port *port,
int cmd, unsigned char *event, int atomic, int hop);
static void fake_event(struct snd_emux *emu, struct snd_emux_port *port,
int ch, int param, int val, int atomic, int hop);
/* operators */
static struct snd_seq_oss_callback oss_callback = {
.owner = THIS_MODULE,
.open = snd_emux_open_seq_oss,
.close = snd_emux_close_seq_oss,
.ioctl = snd_emux_ioctl_seq_oss,
.load_patch = snd_emux_load_patch_seq_oss,
.reset = snd_emux_reset_seq_oss,
};
/*
* register OSS synth
*/
void
snd_emux_init_seq_oss(struct snd_emux *emu)
{
struct snd_seq_oss_reg *arg;
struct snd_seq_device *dev;
if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
sizeof(struct snd_seq_oss_reg), &dev) < 0)
return;
emu->oss_synth = dev;
strcpy(dev->name, emu->name);
arg = SNDRV_SEQ_DEVICE_ARGPTR(dev);
arg->type = SYNTH_TYPE_SAMPLE;
arg->subtype = SAMPLE_TYPE_AWE32;
arg->nvoices = emu->max_voices;
arg->oper = oss_callback;
arg->private_data = emu;
/* register to OSS synth table */
snd_device_register(emu->card, dev);
}
/*
* unregister
*/
void
snd_emux_detach_seq_oss(struct snd_emux *emu)
{
if (emu->oss_synth) {
snd_device_free(emu->card, emu->oss_synth);
emu->oss_synth = NULL;
}
}
/* use port number as a unique soundfont client number */
#define SF_CLIENT_NO(p) ((p) + 0x1000)
/*
* open port for OSS sequencer
*/
static int
snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
{
struct snd_emux *emu;
struct snd_emux_port *p;
struct snd_seq_port_callback callback;
char tmpname[64];
emu = closure;
if (snd_BUG_ON(!arg || !emu))
return -ENXIO;
mutex_lock(&emu->register_mutex);
if (!snd_emux_inc_count(emu)) {
mutex_unlock(&emu->register_mutex);
return -EFAULT;
}
memset(&callback, 0, sizeof(callback));
callback.owner = THIS_MODULE;
callback.event_input = snd_emux_event_oss_input;
sprintf(tmpname, "%s OSS Port", emu->name);
p = snd_emux_create_port(emu, tmpname, 32,
1, &callback);
if (p == NULL) {
snd_printk(KERN_ERR "can't create port\n");
snd_emux_dec_count(emu);
mutex_unlock(&emu->register_mutex);
return -ENOMEM;
}
/* fill the argument data */
arg->private_data = p;
arg->addr.client = p->chset.client;
arg->addr.port = p->chset.port;
p->oss_arg = arg;
reset_port_mode(p, arg->seq_mode);
snd_emux_reset_port(p);
mutex_unlock(&emu->register_mutex);
return 0;
}
#define DEFAULT_DRUM_FLAGS ((1<<9) | (1<<25))
/*
* reset port mode
*/
static void
reset_port_mode(struct snd_emux_port *port, int midi_mode)
{
if (midi_mode) {
port->port_mode = SNDRV_EMUX_PORT_MODE_OSS_MIDI;
port->drum_flags = DEFAULT_DRUM_FLAGS;
port->volume_atten = 0;
port->oss_arg->event_passing = SNDRV_SEQ_OSS_PROCESS_KEYPRESS;
} else {
port->port_mode = SNDRV_EMUX_PORT_MODE_OSS_SYNTH;
port->drum_flags = 0;
port->volume_atten = 32;
port->oss_arg->event_passing = SNDRV_SEQ_OSS_PROCESS_EVENTS;
}
}
/*
* close port
*/
static int
snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
{
struct snd_emux *emu;
struct snd_emux_port *p;
if (snd_BUG_ON(!arg))
return -ENXIO;
p = arg->private_data;
if (snd_BUG_ON(!p))
return -ENXIO;
emu = p->emu;
if (snd_BUG_ON(!emu))
return -ENXIO;
mutex_lock(&emu->register_mutex);
snd_emux_sounds_off_all(p);
snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
snd_seq_event_port_detach(p->chset.client, p->chset.port);
snd_emux_dec_count(emu);
mutex_unlock(&emu->register_mutex);
return 0;
}
/*
* load patch
*/
static int
snd_emux_load_patch_seq_oss(struct snd_seq_oss_arg *arg, int format,
const char __user *buf, int offs, int count)
{
struct snd_emux *emu;
struct snd_emux_port *p;
int rc;
if (snd_BUG_ON(!arg))
return -ENXIO;
p = arg->private_data;
if (snd_BUG_ON(!p))
return -ENXIO;
emu = p->emu;
if (snd_BUG_ON(!emu))
return -ENXIO;
if (format == GUS_PATCH)
rc = snd_soundfont_load_guspatch(emu->sflist, buf, count,
SF_CLIENT_NO(p->chset.port));
else if (format == SNDRV_OSS_SOUNDFONT_PATCH) {
struct soundfont_patch_info patch;
if (count < (int)sizeof(patch))
rc = -EINVAL;
if (copy_from_user(&patch, buf, sizeof(patch)))
rc = -EFAULT;
if (patch.type >= SNDRV_SFNT_LOAD_INFO &&
patch.type <= SNDRV_SFNT_PROBE_DATA)
rc = snd_soundfont_load(emu->sflist, buf, count, SF_CLIENT_NO(p->chset.port));
else {
if (emu->ops.load_fx)
rc = emu->ops.load_fx(emu, patch.type, patch.optarg, buf, count);
else
rc = -EINVAL;
}
} else
rc = 0;
return rc;
}
/*
* ioctl
*/
static int
snd_emux_ioctl_seq_oss(struct snd_seq_oss_arg *arg, unsigned int cmd, unsigned long ioarg)
{
struct snd_emux_port *p;
struct snd_emux *emu;
if (snd_BUG_ON(!arg))
return -ENXIO;
p = arg->private_data;
if (snd_BUG_ON(!p))
return -ENXIO;
emu = p->emu;
if (snd_BUG_ON(!emu))
return -ENXIO;
switch (cmd) {
case SNDCTL_SEQ_RESETSAMPLES:
snd_soundfont_remove_samples(emu->sflist);
return 0;
case SNDCTL_SYNTH_MEMAVL:
if (emu->memhdr)
return snd_util_mem_avail(emu->memhdr);
return 0;
}
return 0;
}
/*
* reset device
*/
static int
snd_emux_reset_seq_oss(struct snd_seq_oss_arg *arg)
{
struct snd_emux_port *p;
if (snd_BUG_ON(!arg))
return -ENXIO;
p = arg->private_data;
if (snd_BUG_ON(!p))
return -ENXIO;
snd_emux_reset_port(p);
return 0;
}
/*
* receive raw events: only SEQ_PRIVATE is accepted.
*/
static int
snd_emux_event_oss_input(struct snd_seq_event *ev, int direct, void *private_data,
int atomic, int hop)
{
struct snd_emux *emu;
struct snd_emux_port *p;
unsigned char cmd, *data;
p = private_data;
if (snd_BUG_ON(!p))
return -EINVAL;
emu = p->emu;
if (snd_BUG_ON(!emu))
return -EINVAL;
if (ev->type != SNDRV_SEQ_EVENT_OSS)
return snd_emux_event_input(ev, direct, private_data, atomic, hop);
data = ev->data.raw8.d;
/* only SEQ_PRIVATE is accepted */
if (data[0] != 0xfe)
return 0;
cmd = data[2] & _EMUX_OSS_MODE_VALUE_MASK;
if (data[2] & _EMUX_OSS_MODE_FLAG)
emuspec_control(emu, p, cmd, data, atomic, hop);
else
gusspec_control(emu, p, cmd, data, atomic, hop);
return 0;
}
/*
* OSS/AWE driver specific h/w controls
*/
static void
emuspec_control(struct snd_emux *emu, struct snd_emux_port *port, int cmd,
unsigned char *event, int atomic, int hop)
{
int voice;
unsigned short p1;
short p2;
int i;
struct snd_midi_channel *chan;
voice = event[3];
if (voice < 0 || voice >= port->chset.max_channels)
chan = NULL;
else
chan = &port->chset.channels[voice];
p1 = *(unsigned short *) &event[4];
p2 = *(short *) &event[6];
switch (cmd) {
#if 0 /* don't do this atomically */
case _EMUX_OSS_REMOVE_LAST_SAMPLES:
snd_soundfont_remove_unlocked(emu->sflist);
break;
#endif
case _EMUX_OSS_SEND_EFFECT:
if (chan)
snd_emux_send_effect_oss(port, chan, p1, p2);
break;
case _EMUX_OSS_TERMINATE_ALL:
snd_emux_terminate_all(emu);
break;
case _EMUX_OSS_TERMINATE_CHANNEL:
/*snd_emux_mute_channel(emu, chan);*/
break;
case _EMUX_OSS_RESET_CHANNEL:
/*snd_emux_channel_init(chset, chan);*/
break;
case _EMUX_OSS_RELEASE_ALL:
fake_event(emu, port, voice, MIDI_CTL_ALL_NOTES_OFF, 0, atomic, hop);
break;
case _EMUX_OSS_NOTEOFF_ALL:
fake_event(emu, port, voice, MIDI_CTL_ALL_SOUNDS_OFF, 0, atomic, hop);
break;
case _EMUX_OSS_INITIAL_VOLUME:
if (p2) {
port->volume_atten = (short)p1;
snd_emux_update_port(port, SNDRV_EMUX_UPDATE_VOLUME);
}
break;
case _EMUX_OSS_CHN_PRESSURE:
if (chan) {
chan->midi_pressure = p1;
snd_emux_update_channel(port, chan, SNDRV_EMUX_UPDATE_FMMOD|SNDRV_EMUX_UPDATE_FM2FRQ2);
}
break;
case _EMUX_OSS_CHANNEL_MODE:
reset_port_mode(port, p1);
snd_emux_reset_port(port);
break;
case _EMUX_OSS_DRUM_CHANNELS:
port->drum_flags = *(unsigned int*)&event[4];
for (i = 0; i < port->chset.max_channels; i++) {
chan = &port->chset.channels[i];
chan->drum_channel = ((port->drum_flags >> i) & 1) ? 1 : 0;
}
break;
case _EMUX_OSS_MISC_MODE:
if (p1 < EMUX_MD_END)
port->ctrls[p1] = p2;
break;
case _EMUX_OSS_DEBUG_MODE:
break;
default:
if (emu->ops.oss_ioctl)
emu->ops.oss_ioctl(emu, cmd, p1, p2);
break;
}
}
/*
* GUS specific h/w controls
*/
#include <linux/ultrasound.h>
static void
gusspec_control(struct snd_emux *emu, struct snd_emux_port *port, int cmd,
unsigned char *event, int atomic, int hop)
{
int voice;
unsigned short p1;
short p2;
int plong;
struct snd_midi_channel *chan;
if (port->port_mode != SNDRV_EMUX_PORT_MODE_OSS_SYNTH)
return;
if (cmd == _GUS_NUMVOICES)
return;
voice = event[3];
if (voice < 0 || voice >= port->chset.max_channels)
return;
chan = &port->chset.channels[voice];
p1 = *(unsigned short *) &event[4];
p2 = *(short *) &event[6];
plong = *(int*) &event[4];
switch (cmd) {
case _GUS_VOICESAMPLE:
chan->midi_program = p1;
return;
case _GUS_VOICEBALA:
/* 0 to 15 --> 0 to 127 */
chan->control[MIDI_CTL_MSB_PAN] = (int)p1 << 3;
snd_emux_update_channel(port, chan, SNDRV_EMUX_UPDATE_PAN);
return;
case _GUS_VOICEVOL:
case _GUS_VOICEVOL2:
/* not supported yet */
return;
case _GUS_RAMPRANGE:
case _GUS_RAMPRATE:
case _GUS_RAMPMODE:
case _GUS_RAMPON:
case _GUS_RAMPOFF:
/* volume ramping not supported */
return;
case _GUS_VOLUME_SCALE:
return;
case _GUS_VOICE_POS:
#ifdef SNDRV_EMUX_USE_RAW_EFFECT
snd_emux_send_effect(port, chan, EMUX_FX_SAMPLE_START,
(short)(plong & 0x7fff),
EMUX_FX_FLAG_SET);
snd_emux_send_effect(port, chan, EMUX_FX_COARSE_SAMPLE_START,
(plong >> 15) & 0xffff,
EMUX_FX_FLAG_SET);
#endif
return;
}
}
/*
* send an event to midi emulation
*/
static void
fake_event(struct snd_emux *emu, struct snd_emux_port *port, int ch, int param, int val, int atomic, int hop)
{
struct snd_seq_event ev;
memset(&ev, 0, sizeof(ev));
ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
ev.data.control.channel = ch;
ev.data.control.param = param;
ev.data.control.value = val;
snd_emux_event_input(&ev, 0, port, atomic, hop);
}
#endif /* CONFIG_SND_SEQUENCER_OSS */
| gpl-2.0 |
uberlaggydarwin/306shcaf | drivers/input/ff-core.c | 4858 | 9133 | /*
* Force feedback support for Linux input subsystem
*
* Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com>
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
#include <linux/input.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
/*
* Check that the effect_id is a valid effect and whether the user
* is the owner
*/
static int check_effect_access(struct ff_device *ff, int effect_id,
struct file *file)
{
if (effect_id < 0 || effect_id >= ff->max_effects ||
!ff->effect_owners[effect_id])
return -EINVAL;
if (file && ff->effect_owners[effect_id] != file)
return -EACCES;
return 0;
}
/*
* Checks whether 2 effects can be combined together
*/
static inline int check_effects_compatible(struct ff_effect *e1,
struct ff_effect *e2)
{
return e1->type == e2->type &&
(e1->type != FF_PERIODIC ||
e1->u.periodic.waveform == e2->u.periodic.waveform);
}
/*
* Convert an effect into compatible one
*/
static int compat_effect(struct ff_device *ff, struct ff_effect *effect)
{
int magnitude;
switch (effect->type) {
case FF_RUMBLE:
if (!test_bit(FF_PERIODIC, ff->ffbit))
return -EINVAL;
/*
* calculate manginude of sine wave as average of rumble's
* 2/3 of strong magnitude and 1/3 of weak magnitude
*/
magnitude = effect->u.rumble.strong_magnitude / 3 +
effect->u.rumble.weak_magnitude / 6;
effect->type = FF_PERIODIC;
effect->u.periodic.waveform = FF_SINE;
effect->u.periodic.period = 50;
effect->u.periodic.magnitude = max(magnitude, 0x7fff);
effect->u.periodic.offset = 0;
effect->u.periodic.phase = 0;
effect->u.periodic.envelope.attack_length = 0;
effect->u.periodic.envelope.attack_level = 0;
effect->u.periodic.envelope.fade_length = 0;
effect->u.periodic.envelope.fade_level = 0;
return 0;
default:
/* Let driver handle conversion */
return 0;
}
}
/**
* input_ff_upload() - upload effect into force-feedback device
* @dev: input device
* @effect: effect to be uploaded
* @file: owner of the effect
*/
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
struct file *file)
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
int ret = 0;
int id;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX ||
!test_bit(effect->type, dev->ffbit)) {
pr_debug("invalid or not supported effect type in upload\n");
return -EINVAL;
}
if (effect->type == FF_PERIODIC &&
(effect->u.periodic.waveform < FF_WAVEFORM_MIN ||
effect->u.periodic.waveform > FF_WAVEFORM_MAX ||
!test_bit(effect->u.periodic.waveform, dev->ffbit))) {
pr_debug("invalid or not supported wave form in upload\n");
return -EINVAL;
}
if (!test_bit(effect->type, ff->ffbit)) {
ret = compat_effect(ff, effect);
if (ret)
return ret;
}
mutex_lock(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
if (id >= ff->max_effects) {
ret = -ENOSPC;
goto out;
}
effect->id = id;
old = NULL;
} else {
id = effect->id;
ret = check_effect_access(ff, id, file);
if (ret)
goto out;
old = &ff->effects[id];
if (!check_effects_compatible(effect, old)) {
ret = -EINVAL;
goto out;
}
}
ret = ff->upload(dev, effect, old);
if (ret)
goto out;
spin_lock_irq(&dev->event_lock);
ff->effects[id] = *effect;
ff->effect_owners[id] = file;
spin_unlock_irq(&dev->event_lock);
out:
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
/*
* Erases the effect if the requester is also the effect owner. The mutex
* should already be locked before calling this function.
*/
static int erase_effect(struct input_dev *dev, int effect_id,
struct file *file)
{
struct ff_device *ff = dev->ff;
int error;
error = check_effect_access(ff, effect_id, file);
if (error)
return error;
spin_lock_irq(&dev->event_lock);
ff->playback(dev, effect_id, 0);
ff->effect_owners[effect_id] = NULL;
spin_unlock_irq(&dev->event_lock);
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
spin_lock_irq(&dev->event_lock);
ff->effect_owners[effect_id] = file;
spin_unlock_irq(&dev->event_lock);
return error;
}
}
return 0;
}
/**
* input_ff_erase - erase a force-feedback effect from device
* @dev: input device to erase effect from
* @effect_id: id of the ffect to be erased
* @file: purported owner of the request
*
* This function erases a force-feedback effect from specified device.
* The effect will only be erased if it was uploaded through the same
* file handle that is requesting erase.
*/
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
mutex_lock(&ff->mutex);
ret = erase_effect(dev, effect_id, file);
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_erase);
/*
* flush_effects - erase all effects owned by a file handle
*/
static int flush_effects(struct input_dev *dev, struct file *file)
{
struct ff_device *ff = dev->ff;
int i;
pr_debug("flushing now\n");
mutex_lock(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
mutex_unlock(&ff->mutex);
return 0;
}
/**
* input_ff_event() - generic handler for force-feedback events
* @dev: input device to send the effect to
* @type: event type (anything but EV_FF is ignored)
* @code: event code
* @value: event value
*/
int input_ff_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
struct ff_device *ff = dev->ff;
if (type != EV_FF)
return 0;
switch (code) {
case FF_GAIN:
if (!test_bit(FF_GAIN, dev->ffbit) || value > 0xffff)
break;
ff->set_gain(dev, value);
break;
case FF_AUTOCENTER:
if (!test_bit(FF_AUTOCENTER, dev->ffbit) || value > 0xffff)
break;
ff->set_autocenter(dev, value);
break;
default:
if (check_effect_access(ff, code, NULL) == 0)
ff->playback(dev, code, value);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_event);
/**
* input_ff_create() - create force-feedback device
* @dev: input device supporting force-feedback
* @max_effects: maximum number of effects supported by the device
*
* This function allocates all necessary memory for a force feedback
* portion of an input device and installs all default handlers.
* @dev->ffbit should be already set up before calling this function.
* Once ff device is created you need to setup its upload, erase,
* playback and other handlers before registering input device
*/
int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
struct ff_device *ff;
size_t ff_dev_size;
int i;
if (!max_effects) {
pr_err("cannot allocate device without any effects\n");
return -EINVAL;
}
ff_dev_size = sizeof(struct ff_device) +
max_effects * sizeof(struct file *);
if (ff_dev_size < max_effects) /* overflow */
return -EINVAL;
ff = kzalloc(ff_dev_size, GFP_KERNEL);
if (!ff)
return -ENOMEM;
ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
GFP_KERNEL);
if (!ff->effects) {
kfree(ff);
return -ENOMEM;
}
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
dev->ff = ff;
dev->flush = flush_effects;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
/* Copy "true" bits into ff device bitmap */
for (i = 0; i <= FF_MAX; i++)
if (test_bit(i, dev->ffbit))
__set_bit(i, ff->ffbit);
/* we can emulate RUMBLE with periodic effects */
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
/**
* input_ff_destroy() - frees force feedback portion of input device
* @dev: input device supporting force feedback
*
* This function is only needed in error path as input core will
* automatically free force feedback structures when device is
* destroyed.
*/
void input_ff_destroy(struct input_dev *dev)
{
struct ff_device *ff = dev->ff;
__clear_bit(EV_FF, dev->evbit);
if (ff) {
if (ff->destroy)
ff->destroy(ff);
kfree(ff->private);
kfree(ff->effects);
kfree(ff);
dev->ff = NULL;
}
}
EXPORT_SYMBOL_GPL(input_ff_destroy);
| gpl-2.0 |
omnirom/android_kernel_samsung_exynos5410 | drivers/scsi/libsas/sas_scsi_host.c | 4858 | 27697 | /*
* Serial Attached SCSI (SAS) class SCSI Host glue.
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include "sas_internal.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
#include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h"
#include "../scsi_priv.h"
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/libata.h>
/* record final status and free the task */
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
int hs = 0, stat = 0;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
hs = DID_NO_CONNECT;
} else { /* ts->resp == SAS_TASK_COMPLETE */
/* task delivered, what happened afterwards? */
switch (ts->stat) {
case SAS_DEV_NO_RESPONSE:
case SAS_INTERRUPTED:
case SAS_PHY_DOWN:
case SAS_NAK_R_ERR:
case SAS_OPEN_TO:
hs = DID_NO_CONNECT;
break;
case SAS_DATA_UNDERRUN:
scsi_set_resid(sc, ts->residual);
if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
hs = DID_ERROR;
break;
case SAS_DATA_OVERRUN:
hs = DID_ERROR;
break;
case SAS_QUEUE_FULL:
hs = DID_SOFT_ERROR; /* retry */
break;
case SAS_DEVICE_UNKNOWN:
hs = DID_BAD_TARGET;
break;
case SAS_SG_ERR:
hs = DID_PARITY;
break;
case SAS_OPEN_REJECT:
if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
hs = DID_SOFT_ERROR; /* retry */
else
hs = DID_ERROR;
break;
case SAS_PROTO_RESPONSE:
SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
"task; please report this\n",
task->dev->port->ha->sas_ha_name);
break;
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
case SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
stat = SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;
break;
}
}
sc->result = (hs << 16) | stat;
ASSIGN_SAS_TASK(sc, NULL);
list_del_init(&task->list);
sas_free_task(task);
}
static void sas_scsi_task_done(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
struct domain_device *dev = task->dev;
struct sas_ha_struct *ha = dev->port->ha;
unsigned long flags;
spin_lock_irqsave(&dev->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state))
task = NULL;
else
ASSIGN_SAS_TASK(sc, NULL);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (unlikely(!task)) {
/* task will be completed by the error handler */
SAS_DPRINTK("task done but aborted\n");
return;
}
if (unlikely(!sc)) {
SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
list_del_init(&task->list);
sas_free_task(task);
return;
}
sas_end_task(sc, task);
sc->scsi_done(sc);
}
static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
struct domain_device *dev,
gfp_t gfp_flags)
{
struct sas_task *task = sas_alloc_task(gfp_flags);
struct scsi_lun lun;
if (!task)
return NULL;
task->uldd_task = cmd;
ASSIGN_SAS_TASK(cmd, task);
task->dev = dev;
task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
task->ssp_task.retry_count = 1;
int_to_scsilun(cmd->device->lun, &lun);
memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
task->scatter = scsi_sglist(cmd);
task->num_scatter = scsi_sg_count(cmd);
task->total_xfer_len = scsi_bufflen(cmd);
task->data_dir = cmd->sc_data_direction;
task->task_done = sas_scsi_task_done;
return task;
}
int sas_queue_up(struct sas_task *task)
{
struct sas_ha_struct *sas_ha = task->dev->port->ha;
struct scsi_core *core = &sas_ha->core;
unsigned long flags;
LIST_HEAD(list);
spin_lock_irqsave(&core->task_queue_lock, flags);
if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
spin_unlock_irqrestore(&core->task_queue_lock, flags);
return -SAS_QUEUE_FULL;
}
list_add_tail(&task->list, &core->task_queue);
core->task_queue_size += 1;
spin_unlock_irqrestore(&core->task_queue_lock, flags);
wake_up_process(core->queue_thread);
return 0;
}
int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct sas_internal *i = to_sas_internal(host->transportt);
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
int res = 0;
/* If the device fell off, no sense in issuing commands */
if (test_bit(SAS_DEV_GONE, &dev->state)) {
cmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
if (dev_is_sata(dev)) {
spin_lock_irq(dev->sata_dev.ap->lock);
res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
spin_unlock_irq(dev->sata_dev.ap->lock);
return res;
}
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
return SCSI_MLQUEUE_HOST_BUSY;
/* Queue up, Direct Mode or Task Collector Mode. */
if (sas_ha->lldd_max_execute_num < 2)
res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
else
res = sas_queue_up(task);
if (res)
goto out_free_task;
return 0;
out_free_task:
SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
ASSIGN_SAS_TASK(cmd, NULL);
sas_free_task(task);
if (res == -SAS_QUEUE_FULL)
cmd->result = DID_SOFT_ERROR << 16; /* retry */
else
cmd->result = DID_ERROR << 16;
out_done:
cmd->scsi_done(cmd);
return 0;
}
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
struct sas_task *task = TO_SAS_TASK(cmd);
/* At this point, we only get called following an actual abort
* of the task, so we should be guaranteed not to be racing with
* any completions from the LLD. Task is freed after this.
*/
sas_end_task(cmd, task);
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
* error handler pending list.
*/
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_ha_struct *ha = dev->port->ha;
struct sas_task *task = TO_SAS_TASK(cmd);
if (!dev_is_sata(dev)) {
sas_eh_finish_cmd(cmd);
return;
}
/* report the timeout to libata */
sas_end_task(cmd, task);
list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
}
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
sas_eh_defer_cmd(cmd);
}
}
static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
struct domain_device *dev)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
struct domain_device *x = cmd_to_domain_dev(cmd);
if (x == dev)
sas_eh_finish_cmd(cmd);
}
}
static void sas_scsi_clear_queue_port(struct list_head *error_q,
struct asd_sas_port *port)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct asd_sas_port *x = dev->port;
if (x == port)
sas_eh_finish_cmd(cmd);
}
}
enum task_disposition {
TASK_IS_DONE,
TASK_IS_ABORTED,
TASK_IS_AT_LU,
TASK_IS_NOT_AT_HA,
TASK_IS_NOT_AT_LU,
TASK_ABORT_FAILED,
};
static enum task_disposition sas_scsi_find_task(struct sas_task *task)
{
struct sas_ha_struct *ha = task->dev->port->ha;
unsigned long flags;
int i, res;
struct sas_internal *si =
to_sas_internal(task->dev->port->ha->core.shost->transportt);
if (ha->lldd_max_execute_num > 1) {
struct scsi_core *core = &ha->core;
struct sas_task *t, *n;
mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
list_for_each_entry_safe(t, n, &core->task_queue, list)
if (task == t) {
list_del_init(&t->list);
break;
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
mutex_unlock(&core->task_queue_flush);
if (task == t)
return TASK_IS_NOT_AT_HA;
}
for (i = 0; i < 5; i++) {
SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
return TASK_IS_DONE;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
return TASK_IS_ABORTED;
} else if (si->dft->lldd_query_task) {
SAS_DPRINTK("%s: querying task 0x%p\n",
__func__, task);
res = si->dft->lldd_query_task(task);
switch (res) {
case TMF_RESP_FUNC_SUCC:
SAS_DPRINTK("%s: task 0x%p at LU\n",
__func__, task);
return TASK_IS_AT_LU;
case TMF_RESP_FUNC_COMPLETE:
SAS_DPRINTK("%s: task 0x%p not at LU\n",
__func__, task);
return TASK_IS_NOT_AT_LU;
case TMF_RESP_FUNC_FAILED:
SAS_DPRINTK("%s: task 0x%p failed to abort\n",
__func__, task);
return TASK_ABORT_FAILED;
}
}
}
return res;
}
static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
{
int res = TMF_RESP_FUNC_FAILED;
struct scsi_lun lun;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
int_to_scsilun(cmd->device->lun, &lun);
SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
SAS_ADDR(dev->sas_addr),
cmd->device->lun);
if (i->dft->lldd_abort_task_set)
res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_clear_task_set)
res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
}
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_lu_reset)
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
}
return res;
}
static int sas_recover_I_T(struct domain_device *dev)
{
int res = TMF_RESP_FUNC_FAILED;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
SAS_ADDR(dev->sas_addr));
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
return res;
}
/* take a reference on the last known good phy for this device */
struct sas_phy *sas_get_local_phy(struct domain_device *dev)
{
struct sas_ha_struct *ha = dev->port->ha;
struct sas_phy *phy;
unsigned long flags;
/* a published domain device always has a valid phy, it may be
* stale, but it is never NULL
*/
BUG_ON(!dev->phy);
spin_lock_irqsave(&ha->phy_port_lock, flags);
phy = dev->phy;
get_device(&phy->dev);
spin_unlock_irqrestore(&ha->phy_port_lock, flags);
return phy;
}
EXPORT_SYMBOL_GPL(sas_get_local_phy);
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
struct scsi_lun lun;
int res;
int_to_scsilun(cmd->device->lun, &lun);
if (!i->dft->lldd_lu_reset)
return FAILED;
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
/* Attempt to send a phy (bus) reset */
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_phy *phy = sas_get_local_phy(dev);
int res;
res = sas_phy_reset(phy, 1);
if (res)
SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
kobject_name(&phy->dev.kobj),
res);
sas_put_local_phy(phy);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
/* Try to reset a device */
static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
{
int res;
struct Scsi_Host *shost = cmd->device->host;
if (!shost->hostt->eh_device_reset_handler)
goto try_bus_reset;
res = shost->hostt->eh_device_reset_handler(cmd);
if (res == SUCCESS)
return res;
try_bus_reset:
if (shost->hostt->eh_bus_reset_handler)
return shost->hostt->eh_bus_reset_handler(cmd);
return FAILED;
}
static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
enum task_disposition res = TASK_IS_DONE;
int tmf_resp, need_reset;
struct sas_internal *i = to_sas_internal(shost->transportt);
unsigned long flags;
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
LIST_HEAD(done);
/* clean out any commands that won the completion vs eh race */
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task;
spin_lock_irqsave(&dev->done_lock, flags);
/* by this point the lldd has either observed
* SAS_HA_FROZEN and is leaving the task alone, or has
* won the race with eh and decided to complete it
*/
task = TO_SAS_TASK(cmd);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (!task)
list_move_tail(&cmd->eh_entry, &done);
}
Again:
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd);
list_del_init(&cmd->eh_entry);
spin_lock_irqsave(&task->task_state_lock, flags);
need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (need_reset) {
SAS_DPRINTK("%s: task 0x%p requests reset\n",
__func__, task);
goto reset;
}
SAS_DPRINTK("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task);
cmd->eh_eflags = 0;
switch (res) {
case TASK_IS_NOT_AT_HA:
SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
__func__, task,
cmd->retries ? "retry" : "aborted");
if (cmd->retries)
cmd->retries--;
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("dev %016llx LU %x is "
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
sas_eh_defer_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
/* fallthrough */
case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
task);
tmf_resp = sas_recover_I_T(task->dev);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
tmf_resp == -ENODEV) {
struct domain_device *dev = task->dev;
SAS_DPRINTK("I_T %016llx recovered\n",
SAS_ADDR(task->dev->sas_addr));
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_I_T(work_q, dev);
goto Again;
}
/* Hammer time :-) */
try_to_reset_cmd_device(cmd);
if (i->dft->lldd_clear_nexus_port) {
struct asd_sas_port *port = task->dev->port;
SAS_DPRINTK("clearing nexus for port:%d\n",
port->id);
res = i->dft->lldd_clear_nexus_port(port);
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("clear nexus port:%d "
"succeeded\n", port->id);
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_port(work_q,
port);
goto Again;
}
}
if (i->dft->lldd_clear_nexus_ha) {
SAS_DPRINTK("clear nexus ha\n");
res = i->dft->lldd_clear_nexus_ha(ha);
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("clear nexus ha "
"succeeded\n");
sas_eh_finish_cmd(cmd);
goto clear_q;
}
}
/* If we are here -- this means that no amount
* of effort could recover from errors. Quite
* possibly the HA just disappeared.
*/
SAS_DPRINTK("error from device %llx, LUN %x "
"couldn't be recovered in any way\n",
SAS_ADDR(task->dev->sas_addr),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
goto clear_q;
}
}
out:
list_splice_tail(&done, work_q);
list_splice_tail_init(&ha->eh_ata_q, work_q);
return;
clear_q:
SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
goto out;
}
void sas_scsi_recover_host(struct Scsi_Host *shost)
{
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
unsigned long flags;
LIST_HEAD(eh_work_q);
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
shost->host_eh_scheduled = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
__func__, shost->host_busy, shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
* SAS_HA_FROZEN gives eh dominion over all sas_task completion.
*/
set_bit(SAS_HA_FROZEN, &ha->state);
sas_eh_handle_sas_errors(shost, &eh_work_q);
clear_bit(SAS_HA_FROZEN, &ha->state);
if (list_empty(&eh_work_q))
goto out;
/*
* Now deal with SCSI commands that completed ok but have a an error
* code (and hopefully sense data) attached. This is roughly what
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
if (ha->lldd_max_execute_num > 1)
wake_up_process(ha->core.queue_thread);
/* now link into libata eh --- if we have any ata devices */
sas_ata_strategy_handler(shost);
scsi_eh_flush_done_q(&ha->eh_done_q);
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
__func__, shost->host_busy, shost->host_failed);
}
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
return BLK_EH_NOT_HANDLED;
}
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
return -EINVAL;
}
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct domain_device *found_dev = NULL;
int i;
unsigned long flags;
spin_lock_irqsave(&ha->phy_port_lock, flags);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
struct domain_device *dev;
spin_lock(&port->dev_list_lock);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (rphy == dev->rphy) {
found_dev = dev;
spin_unlock(&port->dev_list_lock);
goto found;
}
}
spin_unlock(&port->dev_list_lock);
}
found:
spin_unlock_irqrestore(&ha->phy_port_lock, flags);
return found_dev;
}
int sas_target_alloc(struct scsi_target *starget)
{
struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
if (!found_dev)
return -ENODEV;
kref_get(&found_dev->kref);
starget->hostdata = found_dev;
return 0;
}
#define SAS_DEF_QD 256
int sas_slave_configure(struct scsi_device *scsi_dev)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
struct sas_ha_struct *sas_ha;
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
return 0;
}
sas_ha = dev->port->ha;
sas_read_port_mode_page(scsi_dev);
if (scsi_dev->tagged_supported) {
scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
} else {
SAS_DPRINTK("device %llx, LUN %x doesn't support "
"TCQ\n", SAS_ADDR(dev->sas_addr),
scsi_dev->lun);
scsi_dev->tagged_supported = 0;
scsi_set_tag_type(scsi_dev, 0);
scsi_deactivate_tcq(scsi_dev, 1);
}
scsi_dev->allow_restart = 1;
return 0;
}
int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth,
reason);
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
case SCSI_QDEPTH_RAMP_UP:
if (!sdev->tagged_supported)
depth = 1;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
default:
return -EOPNOTSUPP;
}
return depth;
}
int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
return -EINVAL;
if (!scsi_dev->tagged_supported)
return 0;
scsi_deactivate_tcq(scsi_dev, 1);
scsi_set_tag_type(scsi_dev, qt);
scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
return qt;
}
int sas_bios_param(struct scsi_device *scsi_dev,
struct block_device *bdev,
sector_t capacity, int *hsc)
{
hsc[0] = 255;
hsc[1] = 63;
sector_div(capacity, 255*63);
hsc[2] = capacity;
return 0;
}
/* ---------- Task Collector Thread implementation ---------- */
static void sas_queue(struct sas_ha_struct *sas_ha)
{
struct scsi_core *core = &sas_ha->core;
unsigned long flags;
LIST_HEAD(q);
int can_queue;
int res;
struct sas_internal *i = to_sas_internal(core->shost->transportt);
mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
while (!kthread_should_stop() &&
!list_empty(&core->task_queue) &&
!test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
if (can_queue >= 0) {
can_queue = core->task_queue_size;
list_splice_init(&core->task_queue, &q);
} else {
struct list_head *a, *n;
can_queue = sas_ha->lldd_queue_size;
list_for_each_safe(a, n, &core->task_queue) {
list_move_tail(a, &q);
if (--can_queue == 0)
break;
}
can_queue = sas_ha->lldd_queue_size;
}
core->task_queue_size -= can_queue;
spin_unlock_irqrestore(&core->task_queue_lock, flags);
{
struct sas_task *task = list_entry(q.next,
struct sas_task,
list);
list_del_init(&q);
res = i->dft->lldd_execute_task(task, can_queue,
GFP_KERNEL);
if (unlikely(res))
__list_add(&q, task->list.prev, &task->list);
}
spin_lock_irqsave(&core->task_queue_lock, flags);
if (res) {
list_splice_init(&q, &core->task_queue); /*at head*/
core->task_queue_size += can_queue;
}
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
mutex_unlock(&core->task_queue_flush);
}
/**
* sas_queue_thread -- The Task Collector thread
* @_sas_ha: pointer to struct sas_ha
*/
static int sas_queue_thread(void *_sas_ha)
{
struct sas_ha_struct *sas_ha = _sas_ha;
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
sas_queue(sas_ha);
if (kthread_should_stop())
break;
}
return 0;
}
int sas_init_queue(struct sas_ha_struct *sas_ha)
{
struct scsi_core *core = &sas_ha->core;
spin_lock_init(&core->task_queue_lock);
mutex_init(&core->task_queue_flush);
core->task_queue_size = 0;
INIT_LIST_HEAD(&core->task_queue);
core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
"sas_queue_%d", core->shost->host_no);
if (IS_ERR(core->queue_thread))
return PTR_ERR(core->queue_thread);
return 0;
}
void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
{
unsigned long flags;
struct scsi_core *core = &sas_ha->core;
struct sas_task *task, *n;
kthread_stop(core->queue_thread);
if (!list_empty(&core->task_queue))
SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
SAS_ADDR(sas_ha->sas_addr));
spin_lock_irqsave(&core->task_queue_lock, flags);
list_for_each_entry_safe(task, n, &core->task_queue, list) {
struct scsi_cmnd *cmd = task->uldd_task;
list_del_init(&task->list);
ASSIGN_SAS_TASK(cmd, NULL);
sas_free_task(task);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd);
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
}
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
*/
void sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
/* Escape for libsas internal commands */
if (!sc) {
if (!del_timer(&task->timer))
return;
task->timer.function(task->timer.data);
return;
}
if (dev_is_sata(task->dev)) {
sas_ata_task_abort(task);
} else {
struct request_queue *q = sc->device->request_queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(sc->request);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_schedule_eh(sc->device->host);
}
}
void sas_target_destroy(struct scsi_target *starget)
{
struct domain_device *found_dev = starget->hostdata;
if (!found_dev)
return;
starget->hostdata = NULL;
sas_put_device(found_dev);
}
static void sas_parse_addr(u8 *sas_addr, const char *p)
{
int i;
for (i = 0; i < SAS_ADDR_SIZE; i++) {
u8 h, l;
if (!*p)
break;
h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
p++;
l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
p++;
sas_addr[i] = (h<<4) | l;
}
}
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
{
int res;
const struct firmware *fw;
res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
if (res)
return res;
if (fw->size < SAS_STRING_ADDR_SIZE) {
res = -ENODEV;
goto out;
}
sas_parse_addr(addr, fw->data);
out:
release_firmware(fw);
return res;
}
EXPORT_SYMBOL_GPL(sas_request_addr);
EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param);
EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
EXPORT_SYMBOL_GPL(sas_target_destroy);
EXPORT_SYMBOL_GPL(sas_ioctl);
| gpl-2.0 |
LDAP/android_kernel_motorola_msm8226 | sound/pci/emu10k1/irq.c | 12794 | 6691 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Creative Labs, Inc.
* Routines for IRQ control of EMU10K1 chips
*
* BUGS:
* --
*
* TODO:
* --
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/emu10k1.h>
irqreturn_t snd_emu10k1_interrupt(int irq, void *dev_id)
{
struct snd_emu10k1 *emu = dev_id;
unsigned int status, status2, orig_status, orig_status2;
int handled = 0;
int timeout = 0;
while (((status = inl(emu->port + IPR)) != 0) && (timeout < 1000)) {
timeout++;
orig_status = status;
handled = 1;
if ((status & 0xffffffff) == 0xffffffff) {
snd_printk(KERN_INFO "snd-emu10k1: Suspected sound card removal\n");
break;
}
if (status & IPR_PCIERROR) {
snd_printk(KERN_ERR "interrupt: PCI error\n");
snd_emu10k1_intr_disable(emu, INTE_PCIERRORENABLE);
status &= ~IPR_PCIERROR;
}
if (status & (IPR_VOLINCR|IPR_VOLDECR|IPR_MUTE)) {
if (emu->hwvol_interrupt)
emu->hwvol_interrupt(emu, status);
else
snd_emu10k1_intr_disable(emu, INTE_VOLINCRENABLE|INTE_VOLDECRENABLE|INTE_MUTEENABLE);
status &= ~(IPR_VOLINCR|IPR_VOLDECR|IPR_MUTE);
}
if (status & IPR_CHANNELLOOP) {
int voice;
int voice_max = status & IPR_CHANNELNUMBERMASK;
u32 val;
struct snd_emu10k1_voice *pvoice = emu->voices;
val = snd_emu10k1_ptr_read(emu, CLIPL, 0);
for (voice = 0; voice <= voice_max; voice++) {
if (voice == 0x20)
val = snd_emu10k1_ptr_read(emu, CLIPH, 0);
if (val & 1) {
if (pvoice->use && pvoice->interrupt != NULL) {
pvoice->interrupt(emu, pvoice);
snd_emu10k1_voice_intr_ack(emu, voice);
} else {
snd_emu10k1_voice_intr_disable(emu, voice);
}
}
val >>= 1;
pvoice++;
}
val = snd_emu10k1_ptr_read(emu, HLIPL, 0);
for (voice = 0; voice <= voice_max; voice++) {
if (voice == 0x20)
val = snd_emu10k1_ptr_read(emu, HLIPH, 0);
if (val & 1) {
if (pvoice->use && pvoice->interrupt != NULL) {
pvoice->interrupt(emu, pvoice);
snd_emu10k1_voice_half_loop_intr_ack(emu, voice);
} else {
snd_emu10k1_voice_half_loop_intr_disable(emu, voice);
}
}
val >>= 1;
pvoice++;
}
status &= ~IPR_CHANNELLOOP;
}
status &= ~IPR_CHANNELNUMBERMASK;
if (status & (IPR_ADCBUFFULL|IPR_ADCBUFHALFFULL)) {
if (emu->capture_interrupt)
emu->capture_interrupt(emu, status);
else
snd_emu10k1_intr_disable(emu, INTE_ADCBUFENABLE);
status &= ~(IPR_ADCBUFFULL|IPR_ADCBUFHALFFULL);
}
if (status & (IPR_MICBUFFULL|IPR_MICBUFHALFFULL)) {
if (emu->capture_mic_interrupt)
emu->capture_mic_interrupt(emu, status);
else
snd_emu10k1_intr_disable(emu, INTE_MICBUFENABLE);
status &= ~(IPR_MICBUFFULL|IPR_MICBUFHALFFULL);
}
if (status & (IPR_EFXBUFFULL|IPR_EFXBUFHALFFULL)) {
if (emu->capture_efx_interrupt)
emu->capture_efx_interrupt(emu, status);
else
snd_emu10k1_intr_disable(emu, INTE_EFXBUFENABLE);
status &= ~(IPR_EFXBUFFULL|IPR_EFXBUFHALFFULL);
}
if (status & (IPR_MIDITRANSBUFEMPTY|IPR_MIDIRECVBUFEMPTY)) {
if (emu->midi.interrupt)
emu->midi.interrupt(emu, status);
else
snd_emu10k1_intr_disable(emu, INTE_MIDITXENABLE|INTE_MIDIRXENABLE);
status &= ~(IPR_MIDITRANSBUFEMPTY|IPR_MIDIRECVBUFEMPTY);
}
if (status & (IPR_A_MIDITRANSBUFEMPTY2|IPR_A_MIDIRECVBUFEMPTY2)) {
if (emu->midi2.interrupt)
emu->midi2.interrupt(emu, status);
else
snd_emu10k1_intr_disable(emu, INTE_A_MIDITXENABLE2|INTE_A_MIDIRXENABLE2);
status &= ~(IPR_A_MIDITRANSBUFEMPTY2|IPR_A_MIDIRECVBUFEMPTY2);
}
if (status & IPR_INTERVALTIMER) {
if (emu->timer)
snd_timer_interrupt(emu->timer, emu->timer->sticks);
else
snd_emu10k1_intr_disable(emu, INTE_INTERVALTIMERENB);
status &= ~IPR_INTERVALTIMER;
}
if (status & (IPR_GPSPDIFSTATUSCHANGE|IPR_CDROMSTATUSCHANGE)) {
if (emu->spdif_interrupt)
emu->spdif_interrupt(emu, status);
else
snd_emu10k1_intr_disable(emu, INTE_GPSPDIFENABLE|INTE_CDSPDIFENABLE);
status &= ~(IPR_GPSPDIFSTATUSCHANGE|IPR_CDROMSTATUSCHANGE);
}
if (status & IPR_FXDSP) {
if (emu->dsp_interrupt)
emu->dsp_interrupt(emu);
else
snd_emu10k1_intr_disable(emu, INTE_FXDSPENABLE);
status &= ~IPR_FXDSP;
}
if (status & IPR_P16V) {
while ((status2 = inl(emu->port + IPR2)) != 0) {
u32 mask = INTE2_PLAYBACK_CH_0_LOOP; /* Full Loop */
struct snd_emu10k1_voice *pvoice = &(emu->p16v_voices[0]);
struct snd_emu10k1_voice *cvoice = &(emu->p16v_capture_voice);
//printk(KERN_INFO "status2=0x%x\n", status2);
orig_status2 = status2;
if(status2 & mask) {
if(pvoice->use) {
snd_pcm_period_elapsed(pvoice->epcm->substream);
} else {
snd_printk(KERN_ERR "p16v: status: 0x%08x, mask=0x%08x, pvoice=%p, use=%d\n", status2, mask, pvoice, pvoice->use);
}
}
if(status2 & 0x110000) {
//printk(KERN_INFO "capture int found\n");
if(cvoice->use) {
//printk(KERN_INFO "capture period_elapsed\n");
snd_pcm_period_elapsed(cvoice->epcm->substream);
}
}
outl(orig_status2, emu->port + IPR2); /* ack all */
}
status &= ~IPR_P16V;
}
if (status) {
unsigned int bits;
snd_printk(KERN_ERR "emu10k1: unhandled interrupt: 0x%08x\n", status);
//make sure any interrupts we don't handle are disabled:
bits = INTE_FXDSPENABLE |
INTE_PCIERRORENABLE |
INTE_VOLINCRENABLE |
INTE_VOLDECRENABLE |
INTE_MUTEENABLE |
INTE_MICBUFENABLE |
INTE_ADCBUFENABLE |
INTE_EFXBUFENABLE |
INTE_GPSPDIFENABLE |
INTE_CDSPDIFENABLE |
INTE_INTERVALTIMERENB |
INTE_MIDITXENABLE |
INTE_MIDIRXENABLE;
if (emu->audigy)
bits |= INTE_A_MIDITXENABLE2 | INTE_A_MIDIRXENABLE2;
snd_emu10k1_intr_disable(emu, bits);
}
outl(orig_status, emu->port + IPR); /* ack all */
}
if (timeout == 1000)
snd_printk(KERN_INFO "emu10k1 irq routine failure\n");
return IRQ_RETVAL(handled);
}
| gpl-2.0 |
wlasser/PandaCore2 | dep/StormLib/src/libtommath/bn_mp_prime_is_prime.c | 251 | 1934 | #include "tommath.h"
#ifdef BN_MP_PRIME_IS_PRIME_C
/* LibTomMath, multiple-precision integer library -- Tom St Denis
*
* LibTomMath is a library that provides multiple-precision
* integer arithmetic as well as number theoretic functionality.
*
* The library was designed directly after the MPI library by
* Michael Fromberger but has been written from scratch with
* additional optimizations in place.
*
* The library is free for all purposes without any express
* guarantee it works.
*
* Tom St Denis, tomstdenis@gmail.com, http://libtom.org
*/
/* performs a variable number of rounds of Miller-Rabin
*
* Probability of error after t rounds is no more than
*
* Sets result to 1 if probably prime, 0 otherwise
*/
int mp_prime_is_prime (mp_int * a, int t, int *result)
{
mp_int b;
int ix, err, res;
/* default to no */
*result = MP_NO;
/* valid value of t? */
if (t <= 0 || t > PRIME_SIZE) {
return MP_VAL;
}
/* is the input equal to one of the primes in the table? */
for (ix = 0; ix < PRIME_SIZE; ix++) {
if (mp_cmp_d(a, ltm_prime_tab[ix]) == MP_EQ) {
*result = 1;
return MP_OKAY;
}
}
/* first perform trial division */
if ((err = mp_prime_is_divisible (a, &res)) != MP_OKAY) {
return err;
}
/* return if it was trivially divisible */
if (res == MP_YES) {
return MP_OKAY;
}
/* now perform the miller-rabin rounds */
if ((err = mp_init (&b)) != MP_OKAY) {
return err;
}
for (ix = 0; ix < t; ix++) {
/* set the prime */
mp_set (&b, ltm_prime_tab[ix]);
if ((err = mp_prime_miller_rabin (a, &b, &res)) != MP_OKAY) {
goto LBL_B;
}
if (res == MP_NO) {
goto LBL_B;
}
}
/* passed the test */
*result = MP_YES;
LBL_B:mp_clear (&b);
return err;
}
#endif
/* $Source: /cvs/libtom/libtommath/bn_mp_prime_is_prime.c,v $ */
/* $Revision: 1.4 $ */
/* $Date: 2006/12/28 01:25:13 $ */
| gpl-2.0 |
kasperhettinga/p4wifi_stock | arch/arm/common/sa1111.c | 507 | 35138 | /*
* linux/arch/arm/common/sa1111.c
*
* SA1111 support
*
* Original code by John Dorsey
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains all generic SA1111 support.
*
* All initialization functions provided here are intended to be called
* from machine specific code with proper arguments when required.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/sizes.h>
#include <asm/hardware/sa1111.h>
/* SA1111 IRQs */
#define IRQ_GPAIN0 (0)
#define IRQ_GPAIN1 (1)
#define IRQ_GPAIN2 (2)
#define IRQ_GPAIN3 (3)
#define IRQ_GPBIN0 (4)
#define IRQ_GPBIN1 (5)
#define IRQ_GPBIN2 (6)
#define IRQ_GPBIN3 (7)
#define IRQ_GPBIN4 (8)
#define IRQ_GPBIN5 (9)
#define IRQ_GPCIN0 (10)
#define IRQ_GPCIN1 (11)
#define IRQ_GPCIN2 (12)
#define IRQ_GPCIN3 (13)
#define IRQ_GPCIN4 (14)
#define IRQ_GPCIN5 (15)
#define IRQ_GPCIN6 (16)
#define IRQ_GPCIN7 (17)
#define IRQ_MSTXINT (18)
#define IRQ_MSRXINT (19)
#define IRQ_MSSTOPERRINT (20)
#define IRQ_TPTXINT (21)
#define IRQ_TPRXINT (22)
#define IRQ_TPSTOPERRINT (23)
#define SSPXMTINT (24)
#define SSPRCVINT (25)
#define SSPROR (26)
#define AUDXMTDMADONEA (32)
#define AUDRCVDMADONEA (33)
#define AUDXMTDMADONEB (34)
#define AUDRCVDMADONEB (35)
#define AUDTFSR (36)
#define AUDRFSR (37)
#define AUDTUR (38)
#define AUDROR (39)
#define AUDDTS (40)
#define AUDRDD (41)
#define AUDSTO (42)
#define IRQ_USBPWR (43)
#define IRQ_HCIM (44)
#define IRQ_HCIBUFFACC (45)
#define IRQ_HCIRMTWKP (46)
#define IRQ_NHCIMFCIR (47)
#define IRQ_USB_PORT_RESUME (48)
#define IRQ_S0_READY_NINT (49)
#define IRQ_S1_READY_NINT (50)
#define IRQ_S0_CD_VALID (51)
#define IRQ_S1_CD_VALID (52)
#define IRQ_S0_BVD1_STSCHG (53)
#define IRQ_S1_BVD1_STSCHG (54)
extern void __init sa1110_mb_enable(void);
/*
* We keep the following data for the overall SA1111. Note that the
* struct device and struct resource are "fake"; they should be supplied
* by the bus above us. However, in the interests of getting all SA1111
* drivers converted over to the device model, we provide this as an
* anchor point for all the other drivers.
*/
struct sa1111 {
struct device *dev;
struct clk *clk;
unsigned long phys;
int irq;
int irq_base; /* base for cascaded on-chip IRQs */
spinlock_t lock;
void __iomem *base;
#ifdef CONFIG_PM
void *saved_state;
#endif
};
/*
* We _really_ need to eliminate this. Its only users
* are the PWM and DMA checking code.
*/
static struct sa1111 *g_sa1111;
struct sa1111_dev_info {
unsigned long offset;
unsigned long skpcr_mask;
unsigned int devid;
unsigned int irq[6];
};
static struct sa1111_dev_info sa1111_devices[] = {
{
.offset = SA1111_USB,
.skpcr_mask = SKPCR_UCLKEN,
.devid = SA1111_DEVID_USB,
.irq = {
IRQ_USBPWR,
IRQ_HCIM,
IRQ_HCIBUFFACC,
IRQ_HCIRMTWKP,
IRQ_NHCIMFCIR,
IRQ_USB_PORT_RESUME
},
},
{
.offset = 0x0600,
.skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN,
.devid = SA1111_DEVID_SAC,
.irq = {
AUDXMTDMADONEA,
AUDXMTDMADONEB,
AUDRCVDMADONEA,
AUDRCVDMADONEB
},
},
{
.offset = 0x0800,
.skpcr_mask = SKPCR_SCLKEN,
.devid = SA1111_DEVID_SSP,
},
{
.offset = SA1111_KBD,
.skpcr_mask = SKPCR_PTCLKEN,
.devid = SA1111_DEVID_PS2,
.irq = {
IRQ_TPRXINT,
IRQ_TPTXINT
},
},
{
.offset = SA1111_MSE,
.skpcr_mask = SKPCR_PMCLKEN,
.devid = SA1111_DEVID_PS2,
.irq = {
IRQ_MSRXINT,
IRQ_MSTXINT
},
},
{
.offset = 0x1800,
.skpcr_mask = 0,
.devid = SA1111_DEVID_PCMCIA,
.irq = {
IRQ_S0_READY_NINT,
IRQ_S0_CD_VALID,
IRQ_S0_BVD1_STSCHG,
IRQ_S1_READY_NINT,
IRQ_S1_CD_VALID,
IRQ_S1_BVD1_STSCHG,
},
},
};
/*
* SA1111 interrupt support. Since clearing an IRQ while there are
* active IRQs causes the interrupt output to pulse, the upper levels
* will call us again if there are more interrupts to process.
*/
static void
sa1111_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned int stat0, stat1, i;
struct sa1111 *sachip = irq_get_handler_data(irq);
void __iomem *mapbase = sachip->base + SA1111_INTC;
stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1);
sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
desc->irq_data.chip->irq_ack(&desc->irq_data);
sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
if (stat0 == 0 && stat1 == 0) {
do_bad_IRQ(irq, desc);
return;
}
for (i = 0; stat0; i++, stat0 >>= 1)
if (stat0 & 1)
generic_handle_irq(i + sachip->irq_base);
for (i = 32; stat1; i++, stat1 >>= 1)
if (stat1 & 1)
generic_handle_irq(i + sachip->irq_base);
/* For level-based interrupts */
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
static void sa1111_ack_irq(struct irq_data *d)
{
}
static void sa1111_mask_lowirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie0;
ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
ie0 &= ~SA1111_IRQMASK_LO(d->irq);
writel(ie0, mapbase + SA1111_INTEN0);
}
static void sa1111_unmask_lowirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie0;
ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
ie0 |= SA1111_IRQMASK_LO(d->irq);
sa1111_writel(ie0, mapbase + SA1111_INTEN0);
}
/*
* Attempt to re-trigger the interrupt. The SA1111 contains a register
* (INTSET) which claims to do this. However, in practice no amount of
* manipulation of INTEN and INTSET guarantees that the interrupt will
* be triggered. In fact, its very difficult, if not impossible to get
* INTSET to re-trigger the interrupt.
*/
static int sa1111_retrigger_lowirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_LO(d->irq);
unsigned long ip0;
int i;
ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
for (i = 0; i < 8; i++) {
sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
break;
}
if (i == 8)
printk(KERN_ERR "Danger Will Robinson: failed to "
"re-trigger IRQ%d\n", d->irq);
return i == 8 ? -1 : 0;
}
static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_LO(d->irq);
unsigned long ip0;
if (flags == IRQ_TYPE_PROBE)
return 0;
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
if (flags & IRQ_TYPE_EDGE_RISING)
ip0 &= ~mask;
else
ip0 |= mask;
sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
sa1111_writel(ip0, mapbase + SA1111_WAKEPOL0);
return 0;
}
static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_LO(d->irq);
unsigned long we0;
we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
if (on)
we0 |= mask;
else
we0 &= ~mask;
sa1111_writel(we0, mapbase + SA1111_WAKEEN0);
return 0;
}
static struct irq_chip sa1111_low_chip = {
.name = "SA1111-l",
.irq_ack = sa1111_ack_irq,
.irq_mask = sa1111_mask_lowirq,
.irq_unmask = sa1111_unmask_lowirq,
.irq_retrigger = sa1111_retrigger_lowirq,
.irq_set_type = sa1111_type_lowirq,
.irq_set_wake = sa1111_wake_lowirq,
};
static void sa1111_mask_highirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie1;
ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
ie1 &= ~SA1111_IRQMASK_HI(d->irq);
sa1111_writel(ie1, mapbase + SA1111_INTEN1);
}
static void sa1111_unmask_highirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie1;
ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
ie1 |= SA1111_IRQMASK_HI(d->irq);
sa1111_writel(ie1, mapbase + SA1111_INTEN1);
}
/*
* Attempt to re-trigger the interrupt. The SA1111 contains a register
* (INTSET) which claims to do this. However, in practice no amount of
* manipulation of INTEN and INTSET guarantees that the interrupt will
* be triggered. In fact, its very difficult, if not impossible to get
* INTSET to re-trigger the interrupt.
*/
static int sa1111_retrigger_highirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_HI(d->irq);
unsigned long ip1;
int i;
ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
for (i = 0; i < 8; i++) {
sa1111_writel(ip1 ^ mask, mapbase + SA1111_INTPOL1);
sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask)
break;
}
if (i == 8)
printk(KERN_ERR "Danger Will Robinson: failed to "
"re-trigger IRQ%d\n", d->irq);
return i == 8 ? -1 : 0;
}
static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_HI(d->irq);
unsigned long ip1;
if (flags == IRQ_TYPE_PROBE)
return 0;
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
if (flags & IRQ_TYPE_EDGE_RISING)
ip1 &= ~mask;
else
ip1 |= mask;
sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
sa1111_writel(ip1, mapbase + SA1111_WAKEPOL1);
return 0;
}
static int sa1111_wake_highirq(struct irq_data *d, unsigned int on)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_HI(d->irq);
unsigned long we1;
we1 = sa1111_readl(mapbase + SA1111_WAKEEN1);
if (on)
we1 |= mask;
else
we1 &= ~mask;
sa1111_writel(we1, mapbase + SA1111_WAKEEN1);
return 0;
}
static struct irq_chip sa1111_high_chip = {
.name = "SA1111-h",
.irq_ack = sa1111_ack_irq,
.irq_mask = sa1111_mask_highirq,
.irq_unmask = sa1111_unmask_highirq,
.irq_retrigger = sa1111_retrigger_highirq,
.irq_set_type = sa1111_type_highirq,
.irq_set_wake = sa1111_wake_highirq,
};
static void sa1111_setup_irq(struct sa1111 *sachip)
{
void __iomem *irqbase = sachip->base + SA1111_INTC;
unsigned int irq;
/*
* We're guaranteed that this region hasn't been taken.
*/
request_mem_region(sachip->phys + SA1111_INTC, 512, "irq");
/* disable all IRQs */
sa1111_writel(0, irqbase + SA1111_INTEN0);
sa1111_writel(0, irqbase + SA1111_INTEN1);
sa1111_writel(0, irqbase + SA1111_WAKEEN0);
sa1111_writel(0, irqbase + SA1111_WAKEEN1);
/*
* detect on rising edge. Note: Feb 2001 Errata for SA1111
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
*/
sa1111_writel(0, irqbase + SA1111_INTPOL0);
sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
irqbase + SA1111_INTPOL1);
/* clear all IRQs */
sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0);
sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1);
for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) {
irq_set_chip_and_handler(irq, &sa1111_low_chip,
handle_edge_irq);
irq_set_chip_data(irq, sachip);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) {
irq_set_chip_and_handler(irq, &sa1111_high_chip,
handle_edge_irq);
irq_set_chip_data(irq, sachip);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
/*
* Register SA1111 interrupt
*/
irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING);
irq_set_handler_data(sachip->irq, sachip);
irq_set_chained_handler(sachip->irq, sa1111_irq_handler);
}
/*
* Bring the SA1111 out of reset. This requires a set procedure:
* 1. nRESET asserted (by hardware)
* 2. CLK turned on from SA1110
* 3. nRESET deasserted
* 4. VCO turned on, PLL_BYPASS turned off
* 5. Wait lock time, then assert RCLKEn
* 7. PCR set to allow clocking of individual functions
*
* Until we've done this, the only registers we can access are:
* SBI_SKCR
* SBI_SMCR
* SBI_SKID
*/
static void sa1111_wake(struct sa1111 *sachip)
{
unsigned long flags, r;
spin_lock_irqsave(&sachip->lock, flags);
clk_enable(sachip->clk);
/*
* Turn VCO on, and disable PLL Bypass.
*/
r = sa1111_readl(sachip->base + SA1111_SKCR);
r &= ~SKCR_VCO_OFF;
sa1111_writel(r, sachip->base + SA1111_SKCR);
r |= SKCR_PLL_BYPASS | SKCR_OE_EN;
sa1111_writel(r, sachip->base + SA1111_SKCR);
/*
* Wait lock time. SA1111 manual _doesn't_
* specify a figure for this! We choose 100us.
*/
udelay(100);
/*
* Enable RCLK. We also ensure that RDYEN is set.
*/
r |= SKCR_RCLKEN | SKCR_RDYEN;
sa1111_writel(r, sachip->base + SA1111_SKCR);
/*
* Wait 14 RCLK cycles for the chip to finish coming out
* of reset. (RCLK=24MHz). This is 590ns.
*/
udelay(1);
/*
* Ensure all clocks are initially off.
*/
sa1111_writel(0, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
#ifdef CONFIG_ARCH_SA1100
static u32 sa1111_dma_mask[] = {
~0,
~(1 << 20),
~(1 << 23),
~(1 << 24),
~(1 << 25),
~(1 << 20),
~(1 << 20),
0,
};
/*
* Configure the SA1111 shared memory controller.
*/
void
sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
unsigned int cas_latency)
{
unsigned int smcr = SMCR_DTIM | SMCR_MBGE | FInsrt(drac, SMCR_DRAC);
if (cas_latency == 3)
smcr |= SMCR_CLAT;
sa1111_writel(smcr, sachip->base + SA1111_SMCR);
/*
* Now clear the bits in the DMA mask to work around the SA1111
* DMA erratum (Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update, June 2000, Erratum #7).
*/
if (sachip->dev->dma_mask)
*sachip->dev->dma_mask &= sa1111_dma_mask[drac >> 2];
sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
}
#endif
#ifdef CONFIG_DMABOUNCE
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return (machine_is_assabet() || machine_is_pfs168()) &&
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
}
#endif
static void sa1111_dev_release(struct device *_dev)
{
struct sa1111_dev *dev = SA1111_DEV(_dev);
release_resource(&dev->res);
kfree(dev);
}
static int
sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
struct sa1111_dev_info *info)
{
struct sa1111_dev *dev;
int ret;
dev = kzalloc(sizeof(struct sa1111_dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto out;
}
dev_set_name(&dev->dev, "%4.4lx", info->offset);
dev->devid = info->devid;
dev->dev.parent = sachip->dev;
dev->dev.bus = &sa1111_bus_type;
dev->dev.release = sa1111_dev_release;
dev->dev.coherent_dma_mask = sachip->dev->coherent_dma_mask;
dev->res.start = sachip->phys + info->offset;
dev->res.end = dev->res.start + 511;
dev->res.name = dev_name(&dev->dev);
dev->res.flags = IORESOURCE_MEM;
dev->mapbase = sachip->base + info->offset;
dev->skpcr_mask = info->skpcr_mask;
memmove(dev->irq, info->irq, sizeof(dev->irq));
ret = request_resource(parent, &dev->res);
if (ret) {
printk("SA1111: failed to allocate resource for %s\n",
dev->res.name);
dev_set_name(&dev->dev, NULL);
kfree(dev);
goto out;
}
ret = device_register(&dev->dev);
if (ret) {
release_resource(&dev->res);
kfree(dev);
goto out;
}
#ifdef CONFIG_DMABOUNCE
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if (sachip->dev->dma_mask) {
dev->dma_mask = *sachip->dev->dma_mask;
dev->dev.dma_mask = &dev->dma_mask;
if (dev->dma_mask != 0xffffffffUL) {
ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
sa1111_needs_bounce);
if (ret) {
dev_err(&dev->dev, "SA1111: Failed to register"
" with dmabounce\n");
device_unregister(&dev->dev);
}
}
}
#endif
out:
return ret;
}
/**
* sa1111_probe - probe for a single SA1111 chip.
* @phys_addr: physical address of device.
*
* Probe for a SA1111 chip. This must be called
* before any other SA1111-specific code.
*
* Returns:
* %-ENODEV device not found.
* %-EBUSY physical address already marked in-use.
* %0 successful.
*/
static int __devinit
__sa1111_probe(struct device *me, struct resource *mem, int irq)
{
struct sa1111 *sachip;
unsigned long id;
unsigned int has_devs;
int i, ret = -ENODEV;
sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL);
if (!sachip)
return -ENOMEM;
sachip->clk = clk_get(me, "SA1111_CLK");
if (IS_ERR(sachip->clk)) {
ret = PTR_ERR(sachip->clk);
goto err_free;
}
spin_lock_init(&sachip->lock);
sachip->dev = me;
dev_set_drvdata(sachip->dev, sachip);
sachip->phys = mem->start;
sachip->irq = irq;
/*
* Map the whole region. This also maps the
* registers for our children.
*/
sachip->base = ioremap(mem->start, PAGE_SIZE * 2);
if (!sachip->base) {
ret = -ENOMEM;
goto err_clkput;
}
/*
* Probe for the chip. Only touch the SBI registers.
*/
id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id);
ret = -ENODEV;
goto err_unmap;
}
printk(KERN_INFO "SA1111 Microprocessor Companion Chip: "
"silicon revision %lx, metal revision %lx\n",
(id & SKID_SIREV_MASK)>>4, (id & SKID_MTREV_MASK));
/*
* We found it. Wake the chip up, and initialise.
*/
sa1111_wake(sachip);
#ifdef CONFIG_ARCH_SA1100
{
unsigned int val;
/*
* The SDRAM configuration of the SA1110 and the SA1111 must
* match. This is very important to ensure that SA1111 accesses
* don't corrupt the SDRAM. Note that this ungates the SA1111's
* MBGNT signal, so we must have called sa1110_mb_disable()
* beforehand.
*/
sa1111_configure_smc(sachip, 1,
FExtr(MDCNFG, MDCNFG_SA1110_DRAC0),
FExtr(MDCNFG, MDCNFG_SA1110_TDL0));
/*
* We only need to turn on DCLK whenever we want to use the
* DMA. It can otherwise be held firmly in the off position.
* (currently, we always enable it.)
*/
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR);
/*
* Enable the SA1110 memory bus request and grant signals.
*/
sa1110_mb_enable();
}
#endif
/*
* The interrupt controller must be initialised before any
* other device to ensure that the interrupts are available.
*/
if (sachip->irq != NO_IRQ)
sa1111_setup_irq(sachip);
g_sa1111 = sachip;
has_devs = ~0;
if (machine_is_assabet() || machine_is_jornada720() ||
machine_is_badge4())
has_devs &= ~(1 << 4);
else
has_devs &= ~(1 << 1);
for (i = 0; i < ARRAY_SIZE(sa1111_devices); i++)
if (has_devs & (1 << i))
sa1111_init_one_child(sachip, mem, &sa1111_devices[i]);
return 0;
err_unmap:
iounmap(sachip->base);
err_clkput:
clk_put(sachip->clk);
err_free:
kfree(sachip);
return ret;
}
static int sa1111_remove_one(struct device *dev, void *data)
{
device_unregister(dev);
return 0;
}
static void __sa1111_remove(struct sa1111 *sachip)
{
void __iomem *irqbase = sachip->base + SA1111_INTC;
device_for_each_child(sachip->dev, NULL, sa1111_remove_one);
/* disable all IRQs */
sa1111_writel(0, irqbase + SA1111_INTEN0);
sa1111_writel(0, irqbase + SA1111_INTEN1);
sa1111_writel(0, irqbase + SA1111_WAKEEN0);
sa1111_writel(0, irqbase + SA1111_WAKEEN1);
clk_disable(sachip->clk);
if (sachip->irq != NO_IRQ) {
irq_set_chained_handler(sachip->irq, NULL);
irq_set_handler_data(sachip->irq, NULL);
release_mem_region(sachip->phys + SA1111_INTC, 512);
}
iounmap(sachip->base);
clk_put(sachip->clk);
kfree(sachip);
}
struct sa1111_save_data {
unsigned int skcr;
unsigned int skpcr;
unsigned int skcdr;
unsigned char skaud;
unsigned char skpwm0;
unsigned char skpwm1;
/*
* Interrupt controller
*/
unsigned int intpol0;
unsigned int intpol1;
unsigned int inten0;
unsigned int inten1;
unsigned int wakepol0;
unsigned int wakepol1;
unsigned int wakeen0;
unsigned int wakeen1;
};
#ifdef CONFIG_PM
static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags;
unsigned int val;
void __iomem *base;
save = kmalloc(sizeof(struct sa1111_save_data), GFP_KERNEL);
if (!save)
return -ENOMEM;
sachip->saved_state = save;
spin_lock_irqsave(&sachip->lock, flags);
/*
* Save state.
*/
base = sachip->base;
save->skcr = sa1111_readl(base + SA1111_SKCR);
save->skpcr = sa1111_readl(base + SA1111_SKPCR);
save->skcdr = sa1111_readl(base + SA1111_SKCDR);
save->skaud = sa1111_readl(base + SA1111_SKAUD);
save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0);
save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
save->intpol0 = sa1111_readl(base + SA1111_INTPOL0);
save->intpol1 = sa1111_readl(base + SA1111_INTPOL1);
save->inten0 = sa1111_readl(base + SA1111_INTEN0);
save->inten1 = sa1111_readl(base + SA1111_INTEN1);
save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0);
save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1);
save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0);
save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1);
/*
* Disable.
*/
val = sa1111_readl(sachip->base + SA1111_SKCR);
sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR);
sa1111_writel(0, sachip->base + SA1111_SKPWM0);
sa1111_writel(0, sachip->base + SA1111_SKPWM1);
clk_disable(sachip->clk);
spin_unlock_irqrestore(&sachip->lock, flags);
return 0;
}
/*
* sa1111_resume - Restore the SA1111 device state.
* @dev: device to restore
*
* Restore the general state of the SA1111; clock control and
* interrupt controller. Other parts of the SA1111 must be
* restored by their respective drivers, and must be called
* via LDM after this function.
*/
static int sa1111_resume(struct platform_device *dev)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags, id;
void __iomem *base;
save = sachip->saved_state;
if (!save)
return 0;
/*
* Ensure that the SA1111 is still here.
* FIXME: shouldn't do this here.
*/
id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip);
platform_set_drvdata(dev, NULL);
kfree(save);
return 0;
}
/*
* First of all, wake up the chip.
*/
sa1111_wake(sachip);
/*
* Only lock for write ops. Also, sa1111_wake must be called with
* released spinlock!
*/
spin_lock_irqsave(&sachip->lock, flags);
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
base = sachip->base;
sa1111_writel(save->skcr, base + SA1111_SKCR);
sa1111_writel(save->skpcr, base + SA1111_SKPCR);
sa1111_writel(save->skcdr, base + SA1111_SKCDR);
sa1111_writel(save->skaud, base + SA1111_SKAUD);
sa1111_writel(save->skpwm0, base + SA1111_SKPWM0);
sa1111_writel(save->skpwm1, base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
sa1111_writel(save->intpol0, base + SA1111_INTPOL0);
sa1111_writel(save->intpol1, base + SA1111_INTPOL1);
sa1111_writel(save->inten0, base + SA1111_INTEN0);
sa1111_writel(save->inten1, base + SA1111_INTEN1);
sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0);
sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1);
sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0);
sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1);
spin_unlock_irqrestore(&sachip->lock, flags);
sachip->saved_state = NULL;
kfree(save);
return 0;
}
#else
#define sa1111_suspend NULL
#define sa1111_resume NULL
#endif
static int __devinit sa1111_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
return __sa1111_probe(&pdev->dev, mem, irq);
}
static int sa1111_remove(struct platform_device *pdev)
{
struct sa1111 *sachip = platform_get_drvdata(pdev);
if (sachip) {
#ifdef CONFIG_PM
kfree(sachip->saved_state);
sachip->saved_state = NULL;
#endif
__sa1111_remove(sachip);
platform_set_drvdata(pdev, NULL);
}
return 0;
}
/*
* Not sure if this should be on the system bus or not yet.
* We really want some way to register a system device at
* the per-machine level, and then have this driver pick
* up the registered devices.
*
* We also need to handle the SDRAM configuration for
* PXA250/SA1110 machine classes.
*/
static struct platform_driver sa1111_device_driver = {
.probe = sa1111_probe,
.remove = sa1111_remove,
.suspend = sa1111_suspend,
.resume = sa1111_resume,
.driver = {
.name = "sa1111",
},
};
/*
* Get the parent device driver (us) structure
* from a child function device
*/
static inline struct sa1111 *sa1111_chip_driver(struct sa1111_dev *sadev)
{
return (struct sa1111 *)dev_get_drvdata(sadev->dev.parent);
}
/*
* The bits in the opdiv field are non-linear.
*/
static unsigned char opdiv_table[] = { 1, 4, 2, 8 };
static unsigned int __sa1111_pll_clock(struct sa1111 *sachip)
{
unsigned int skcdr, fbdiv, ipdiv, opdiv;
skcdr = sa1111_readl(sachip->base + SA1111_SKCDR);
fbdiv = (skcdr & 0x007f) + 2;
ipdiv = ((skcdr & 0x0f80) >> 7) + 2;
opdiv = opdiv_table[(skcdr & 0x3000) >> 12];
return 3686400 * fbdiv / (ipdiv * opdiv);
}
/**
* sa1111_pll_clock - return the current PLL clock frequency.
* @sadev: SA1111 function block
*
* BUG: we should look at SKCR. We also blindly believe that
* the chip is being fed with the 3.6864MHz clock.
*
* Returns the PLL clock in Hz.
*/
unsigned int sa1111_pll_clock(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
return __sa1111_pll_clock(sachip);
}
EXPORT_SYMBOL(sa1111_pll_clock);
/**
* sa1111_select_audio_mode - select I2S or AC link mode
* @sadev: SA1111 function block
* @mode: One of %SA1111_AUDIO_ACLINK or %SA1111_AUDIO_I2S
*
* Frob the SKCR to select AC Link mode or I2S mode for
* the audio block.
*/
void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKCR);
if (mode == SA1111_AUDIO_I2S) {
val &= ~SKCR_SELAC;
} else {
val |= SKCR_SELAC;
}
sa1111_writel(val, sachip->base + SA1111_SKCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_select_audio_mode);
/**
* sa1111_set_audio_rate - set the audio sample rate
* @sadev: SA1111 SAC function block
* @rate: sample rate to select
*/
int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned int div;
if (sadev->devid != SA1111_DEVID_SAC)
return -EINVAL;
div = (__sa1111_pll_clock(sachip) / 256 + rate / 2) / rate;
if (div == 0)
div = 1;
if (div > 128)
div = 128;
sa1111_writel(div - 1, sachip->base + SA1111_SKAUD);
return 0;
}
EXPORT_SYMBOL(sa1111_set_audio_rate);
/**
* sa1111_get_audio_rate - get the audio sample rate
* @sadev: SA1111 SAC function block device
*/
int sa1111_get_audio_rate(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long div;
if (sadev->devid != SA1111_DEVID_SAC)
return -EINVAL;
div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1;
return __sa1111_pll_clock(sachip) / (256 * div);
}
EXPORT_SYMBOL(sa1111_get_audio_rate);
void sa1111_set_io_dir(struct sa1111_dev *sadev,
unsigned int bits, unsigned int dir,
unsigned int sleep_dir)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
void __iomem *gpio = sachip->base + SA1111_GPIO;
#define MODIFY_BITS(port, mask, dir) \
if (mask) { \
val = sa1111_readl(port); \
val &= ~(mask); \
val |= (dir) & (mask); \
sa1111_writel(val, port); \
}
spin_lock_irqsave(&sachip->lock, flags);
MODIFY_BITS(gpio + SA1111_GPIO_PADDR, bits & 15, dir);
MODIFY_BITS(gpio + SA1111_GPIO_PBDDR, (bits >> 8) & 255, dir >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCDDR, (bits >> 16) & 255, dir >> 16);
MODIFY_BITS(gpio + SA1111_GPIO_PASDR, bits & 15, sleep_dir);
MODIFY_BITS(gpio + SA1111_GPIO_PBSDR, (bits >> 8) & 255, sleep_dir >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_set_io_dir);
void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
void __iomem *gpio = sachip->base + SA1111_GPIO;
spin_lock_irqsave(&sachip->lock, flags);
MODIFY_BITS(gpio + SA1111_GPIO_PADWR, bits & 15, v);
MODIFY_BITS(gpio + SA1111_GPIO_PBDWR, (bits >> 8) & 255, v >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_set_io);
void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
void __iomem *gpio = sachip->base + SA1111_GPIO;
spin_lock_irqsave(&sachip->lock, flags);
MODIFY_BITS(gpio + SA1111_GPIO_PASSR, bits & 15, v);
MODIFY_BITS(gpio + SA1111_GPIO_PBSSR, (bits >> 8) & 255, v >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_set_sleep_io);
/*
* Individual device operations.
*/
/**
* sa1111_enable_device - enable an on-chip SA1111 function block
* @sadev: SA1111 function block device to enable
*/
void sa1111_enable_device(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_enable_device);
/**
* sa1111_disable_device - disable an on-chip SA1111 function block
* @sadev: SA1111 function block device to disable
*/
void sa1111_disable_device(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_disable_device);
/*
* SA1111 "Register Access Bus."
*
* We model this as a regular bus type, and hang devices directly
* off this.
*/
static int sa1111_match(struct device *_dev, struct device_driver *_drv)
{
struct sa1111_dev *dev = SA1111_DEV(_dev);
struct sa1111_driver *drv = SA1111_DRV(_drv);
return dev->devid == drv->devid;
}
static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv && drv->suspend)
ret = drv->suspend(sadev, state);
return ret;
}
static int sa1111_bus_resume(struct device *dev)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv && drv->resume)
ret = drv->resume(sadev);
return ret;
}
static int sa1111_bus_probe(struct device *dev)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = -ENODEV;
if (drv->probe)
ret = drv->probe(sadev);
return ret;
}
static int sa1111_bus_remove(struct device *dev)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv->remove)
ret = drv->remove(sadev);
return ret;
}
struct bus_type sa1111_bus_type = {
.name = "sa1111-rab",
.match = sa1111_match,
.probe = sa1111_bus_probe,
.remove = sa1111_bus_remove,
.suspend = sa1111_bus_suspend,
.resume = sa1111_bus_resume,
};
EXPORT_SYMBOL(sa1111_bus_type);
int sa1111_driver_register(struct sa1111_driver *driver)
{
driver->drv.bus = &sa1111_bus_type;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(sa1111_driver_register);
void sa1111_driver_unregister(struct sa1111_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(sa1111_driver_unregister);
static int __init sa1111_init(void)
{
int ret = bus_register(&sa1111_bus_type);
if (ret == 0)
platform_driver_register(&sa1111_device_driver);
return ret;
}
static void __exit sa1111_exit(void)
{
platform_driver_unregister(&sa1111_device_driver);
bus_unregister(&sa1111_bus_type);
}
subsys_initcall(sa1111_init);
module_exit(sa1111_exit);
MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
drhonk/Bali-SGS4G | drivers/isdn/gigaset/asyncdata.c | 507 | 15423 | /*
* Common data handling layer for ser_gigaset and usb_gigaset
*
* Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
* Hansjoerg Lipp <hjlipp@web.de>,
* Stefan Eilers.
*
* =====================================================================
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
* =====================================================================
*/
#include "gigaset.h"
#include <linux/crc-ccitt.h>
#include <linux/bitrev.h>
/* check if byte must be stuffed/escaped
* I'm not sure which data should be encoded.
* Therefore I will go the hard way and decode every value
* less than 0x20, the flag sequence and the control escape char.
*/
static inline int muststuff(unsigned char c)
{
if (c < PPP_TRANS) return 1;
if (c == PPP_FLAG) return 1;
if (c == PPP_ESCAPE) return 1;
/* other possible candidates: */
/* 0x91: XON with parity set */
/* 0x93: XOFF with parity set */
return 0;
}
/* == data input =========================================================== */
/* process a block of received bytes in command mode (modem response)
* Return value:
* number of processed bytes
*/
static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes,
struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
unsigned cbytes = cs->cbytes;
int inputstate = inbuf->inputstate;
int startbytes = numbytes;
for (;;) {
cs->respdata[cbytes] = c;
if (c == 10 || c == 13) {
gig_dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
__func__, cbytes);
cs->cbytes = cbytes;
gigaset_handle_modem_response(cs); /* can change
cs->dle */
cbytes = 0;
if (cs->dle &&
!(inputstate & INS_DLE_command)) {
inputstate &= ~INS_command;
break;
}
} else {
/* advance in line buffer, checking for overflow */
if (cbytes < MAX_RESP_SIZE - 1)
cbytes++;
else
dev_warn(cs->dev, "response too large\n");
}
if (!numbytes)
break;
c = *src++;
--numbytes;
if (c == DLE_FLAG &&
(cs->dle || inputstate & INS_DLE_command)) {
inputstate |= INS_DLE_char;
break;
}
}
cs->cbytes = cbytes;
inbuf->inputstate = inputstate;
return startbytes - numbytes;
}
/* process a block of received bytes in lock mode (tty i/f)
* Return value:
* number of processed bytes
*/
static inline int lock_loop(unsigned char *src, int numbytes,
struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
numbytes, src);
gigaset_if_receive(cs, src, numbytes);
return numbytes;
}
/* process a block of received bytes in HDLC data mode
* Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
* When a frame is complete, check the FCS and pass valid frames to the LL.
* If DLE is encountered, return immediately to let the caller handle it.
* Return value:
* number of processed bytes
* numbytes (all bytes processed) on error --FIXME
*/
static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes,
struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
struct bc_state *bcs = inbuf->bcs;
int inputstate = bcs->inputstate;
__u16 fcs = bcs->fcs;
struct sk_buff *skb = bcs->skb;
unsigned char error;
struct sk_buff *compskb;
int startbytes = numbytes;
int l;
if (unlikely(inputstate & INS_byte_stuff)) {
inputstate &= ~INS_byte_stuff;
goto byte_stuff;
}
for (;;) {
if (unlikely(c == PPP_ESCAPE)) {
if (unlikely(!numbytes)) {
inputstate |= INS_byte_stuff;
break;
}
c = *src++;
--numbytes;
if (unlikely(c == DLE_FLAG &&
(cs->dle ||
inbuf->inputstate & INS_DLE_command))) {
inbuf->inputstate |= INS_DLE_char;
inputstate |= INS_byte_stuff;
break;
}
byte_stuff:
c ^= PPP_TRANS;
if (unlikely(!muststuff(c)))
gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
} else if (unlikely(c == PPP_FLAG)) {
if (unlikely(inputstate & INS_skip_frame)) {
#ifdef CONFIG_GIGASET_DEBUG
if (!(inputstate & INS_have_data)) { /* 7E 7E */
++bcs->emptycount;
} else
gig_dbg(DEBUG_HDLC,
"7e----------------------------");
#endif
/* end of frame */
error = 1;
gigaset_rcv_error(NULL, cs, bcs);
} else if (!(inputstate & INS_have_data)) { /* 7E 7E */
#ifdef CONFIG_GIGASET_DEBUG
++bcs->emptycount;
#endif
break;
} else {
gig_dbg(DEBUG_HDLC,
"7e----------------------------");
/* end of frame */
error = 0;
if (unlikely(fcs != PPP_GOODFCS)) {
dev_err(cs->dev,
"Checksum failed, %u bytes corrupted!\n",
skb->len);
compskb = NULL;
gigaset_rcv_error(compskb, cs, bcs);
error = 1;
} else {
if (likely((l = skb->len) > 2)) {
skb->tail -= 2;
skb->len -= 2;
} else {
dev_kfree_skb(skb);
skb = NULL;
inputstate |= INS_skip_frame;
if (l == 1) {
dev_err(cs->dev,
"invalid packet size (1)!\n");
error = 1;
gigaset_rcv_error(NULL,
cs, bcs);
}
}
if (likely(!(error ||
(inputstate &
INS_skip_frame)))) {
gigaset_rcv_skb(skb, cs, bcs);
}
}
}
if (unlikely(error))
if (skb)
dev_kfree_skb(skb);
fcs = PPP_INITFCS;
inputstate &= ~(INS_have_data | INS_skip_frame);
if (unlikely(bcs->ignore)) {
inputstate |= INS_skip_frame;
skb = NULL;
} else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) {
skb_reserve(skb, HW_HDR_LEN);
} else {
dev_warn(cs->dev,
"could not allocate new skb\n");
inputstate |= INS_skip_frame;
}
break;
} else if (unlikely(muststuff(c))) {
/* Should not happen. Possible after ZDLE=1<CR><LF>. */
gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
}
/* add character */
#ifdef CONFIG_GIGASET_DEBUG
if (unlikely(!(inputstate & INS_have_data))) {
gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
bcs->emptycount);
bcs->emptycount = 0;
}
#endif
inputstate |= INS_have_data;
if (likely(!(inputstate & INS_skip_frame))) {
if (unlikely(skb->len == SBUFSIZE)) {
dev_warn(cs->dev, "received packet too long\n");
dev_kfree_skb_any(skb);
skb = NULL;
inputstate |= INS_skip_frame;
break;
}
*__skb_put(skb, 1) = c;
fcs = crc_ccitt_byte(fcs, c);
}
if (unlikely(!numbytes))
break;
c = *src++;
--numbytes;
if (unlikely(c == DLE_FLAG &&
(cs->dle ||
inbuf->inputstate & INS_DLE_command))) {
inbuf->inputstate |= INS_DLE_char;
break;
}
}
bcs->inputstate = inputstate;
bcs->fcs = fcs;
bcs->skb = skb;
return startbytes - numbytes;
}
/* process a block of received bytes in transparent data mode
* Invert bytes, undoing byte stuffing and watching for DLE escapes.
* If DLE is encountered, return immediately to let the caller handle it.
* Return value:
* number of processed bytes
* numbytes (all bytes processed) on error --FIXME
*/
static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
struct bc_state *bcs = inbuf->bcs;
int inputstate = bcs->inputstate;
struct sk_buff *skb = bcs->skb;
int startbytes = numbytes;
for (;;) {
/* add character */
inputstate |= INS_have_data;
if (likely(!(inputstate & INS_skip_frame))) {
if (unlikely(skb->len == SBUFSIZE)) {
//FIXME just pass skb up and allocate a new one
dev_warn(cs->dev, "received packet too long\n");
dev_kfree_skb_any(skb);
skb = NULL;
inputstate |= INS_skip_frame;
break;
}
*__skb_put(skb, 1) = bitrev8(c);
}
if (unlikely(!numbytes))
break;
c = *src++;
--numbytes;
if (unlikely(c == DLE_FLAG &&
(cs->dle ||
inbuf->inputstate & INS_DLE_command))) {
inbuf->inputstate |= INS_DLE_char;
break;
}
}
/* pass data up */
if (likely(inputstate & INS_have_data)) {
if (likely(!(inputstate & INS_skip_frame))) {
gigaset_rcv_skb(skb, cs, bcs);
}
inputstate &= ~(INS_have_data | INS_skip_frame);
if (unlikely(bcs->ignore)) {
inputstate |= INS_skip_frame;
skb = NULL;
} else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN))
!= NULL)) {
skb_reserve(skb, HW_HDR_LEN);
} else {
dev_warn(cs->dev, "could not allocate new skb\n");
inputstate |= INS_skip_frame;
}
}
bcs->inputstate = inputstate;
bcs->skb = skb;
return startbytes - numbytes;
}
/**
* gigaset_m10x_input() - process a block of data received from the device
* @inbuf: received data and device descriptor structure.
*
* Called by hardware module {ser,usb}_gigaset with a block of received
* bytes. Separates the bytes received over the serial data channel into
* user data and command replies (locked/unlocked) according to the
* current state of the interface.
*/
void gigaset_m10x_input(struct inbuf_t *inbuf)
{
struct cardstate *cs;
unsigned tail, head, numbytes;
unsigned char *src, c;
int procbytes;
head = inbuf->head;
tail = inbuf->tail;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
if (head != tail) {
cs = inbuf->cs;
src = inbuf->data + head;
numbytes = (head > tail ? RBUFSIZE : tail) - head;
gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
while (numbytes) {
if (cs->mstate == MS_LOCKED) {
procbytes = lock_loop(src, numbytes, inbuf);
src += procbytes;
numbytes -= procbytes;
} else {
c = *src++;
--numbytes;
if (c == DLE_FLAG && (cs->dle ||
inbuf->inputstate & INS_DLE_command)) {
if (!(inbuf->inputstate & INS_DLE_char)) {
inbuf->inputstate |= INS_DLE_char;
goto nextbyte;
}
/* <DLE> <DLE> => <DLE> in data stream */
inbuf->inputstate &= ~INS_DLE_char;
}
if (!(inbuf->inputstate & INS_DLE_char)) {
/* FIXME use function pointers? */
if (inbuf->inputstate & INS_command)
procbytes = cmd_loop(c, src, numbytes, inbuf);
else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC)
procbytes = hdlc_loop(c, src, numbytes, inbuf);
else
procbytes = iraw_loop(c, src, numbytes, inbuf);
src += procbytes;
numbytes -= procbytes;
} else { /* DLE char */
inbuf->inputstate &= ~INS_DLE_char;
switch (c) {
case 'X': /*begin of command*/
if (inbuf->inputstate & INS_command)
dev_warn(cs->dev,
"received <DLE> 'X' in command mode\n");
inbuf->inputstate |=
INS_command | INS_DLE_command;
break;
case '.': /*end of command*/
if (!(inbuf->inputstate & INS_command))
dev_warn(cs->dev,
"received <DLE> '.' in hdlc mode\n");
inbuf->inputstate &= cs->dle ?
~(INS_DLE_command|INS_command)
: ~INS_DLE_command;
break;
//case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
default:
dev_err(cs->dev,
"received 0x10 0x%02x!\n",
(int) c);
/* FIXME: reset driver?? */
}
}
}
nextbyte:
if (!numbytes) {
/* end of buffer, check for wrap */
if (head > tail) {
head = 0;
src = inbuf->data;
numbytes = tail;
} else {
head = tail;
break;
}
}
}
gig_dbg(DEBUG_INTR, "setting head to %u", head);
inbuf->head = head;
}
}
EXPORT_SYMBOL_GPL(gigaset_m10x_input);
/* == data output ========================================================== */
/* Encoding of a PPP packet into an octet stuffed HDLC frame
* with FCS, opening and closing flags.
* parameters:
* skb skb containing original packet (freed upon return)
* head number of headroom bytes to allocate in result skb
* tail number of tailroom bytes to allocate in result skb
* Return value:
* pointer to newly allocated skb containing the result frame
*/
static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
{
struct sk_buff *hdlc_skb;
__u16 fcs;
unsigned char c;
unsigned char *cp;
int len;
unsigned int stuf_cnt;
stuf_cnt = 0;
fcs = PPP_INITFCS;
cp = skb->data;
len = skb->len;
while (len--) {
if (muststuff(*cp))
stuf_cnt++;
fcs = crc_ccitt_byte(fcs, *cp++);
}
fcs ^= 0xffff; /* complement */
/* size of new buffer: original size + number of stuffing bytes
* + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
*/
hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head);
if (!hdlc_skb) {
dev_kfree_skb(skb);
return NULL;
}
skb_reserve(hdlc_skb, head);
/* Copy acknowledge request into new skb */
memcpy(hdlc_skb->head, skb->head, 2);
/* Add flag sequence in front of everything.. */
*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
/* Perform byte stuffing while copying data. */
while (skb->len--) {
if (muststuff(*skb->data)) {
*(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
*(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS;
} else
*(skb_put(hdlc_skb, 1)) = *skb->data++;
}
/* Finally add FCS (byte stuffed) and flag sequence */
c = (fcs & 0x00ff); /* least significant byte first */
if (muststuff(c)) {
*(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
c ^= PPP_TRANS;
}
*(skb_put(hdlc_skb, 1)) = c;
c = ((fcs >> 8) & 0x00ff);
if (muststuff(c)) {
*(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
c ^= PPP_TRANS;
}
*(skb_put(hdlc_skb, 1)) = c;
*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
dev_kfree_skb(skb);
return hdlc_skb;
}
/* Encoding of a raw packet into an octet stuffed bit inverted frame
* parameters:
* skb skb containing original packet (freed upon return)
* head number of headroom bytes to allocate in result skb
* tail number of tailroom bytes to allocate in result skb
* Return value:
* pointer to newly allocated skb containing the result frame
*/
static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
{
struct sk_buff *iraw_skb;
unsigned char c;
unsigned char *cp;
int len;
/* worst case: every byte must be stuffed */
iraw_skb = dev_alloc_skb(2*skb->len + tail + head);
if (!iraw_skb) {
dev_kfree_skb(skb);
return NULL;
}
skb_reserve(iraw_skb, head);
cp = skb->data;
len = skb->len;
while (len--) {
c = bitrev8(*cp++);
if (c == DLE_FLAG)
*(skb_put(iraw_skb, 1)) = c;
*(skb_put(iraw_skb, 1)) = c;
}
dev_kfree_skb(skb);
return iraw_skb;
}
/**
* gigaset_m10x_send_skb() - queue an skb for sending
* @bcs: B channel descriptor structure.
* @skb: data to send.
*
* Called by i4l.c to encode and queue an skb for sending, and start
* transmission if necessary.
*
* Return value:
* number of bytes accepted for sending (skb->len) if ok,
* error code < 0 (eg. -ENOMEM) on error
*/
int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
{
unsigned len = skb->len;
unsigned long flags;
if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
skb = HDLC_Encode(skb, HW_HDR_LEN, 0);
else
skb = iraw_encode(skb, HW_HDR_LEN, 0);
if (!skb) {
dev_err(bcs->cs->dev,
"unable to allocate memory for encoding!\n");
return -ENOMEM;
}
skb_queue_tail(&bcs->squeue, skb);
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->cs->connected)
tasklet_schedule(&bcs->cs->write_tasklet);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
return len; /* ok so far */
}
EXPORT_SYMBOL_GPL(gigaset_m10x_send_skb);
| gpl-2.0 |
getitnowmarketing/Gz-One-Commando | arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 507 | 11773 | /*
* SH7366 Setup
*
* Copyright (C) 2008 Renesas Solutions
*
* Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/uio_driver.h>
#include <linux/sh_timer.h>
#include <linux/usb/r8a66597.h>
#include <asm/clock.h>
static struct resource iic_resources[] = {
[0] = {
.name = "IIC",
.start = 0x04470000,
.end = 0x04470017,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 96,
.end = 99,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic_device = {
.name = "i2c-sh_mobile",
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
};
static struct r8a66597_platdata r8a66597_data = {
.on_chip = 1,
};
static struct resource usb_host_resources[] = {
[0] = {
.start = 0xa4d80000,
.end = 0xa4d800ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 65,
.end = 65,
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct platform_device usb_host_device = {
.name = "r8a66597_hcd",
.id = -1,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
.platform_data = &r8a66597_data,
},
.num_resources = ARRAY_SIZE(usb_host_resources),
.resource = usb_host_resources,
};
static struct uio_info vpu_platform_data = {
.name = "VPU5",
.version = "0",
.irq = 60,
};
static struct resource vpu_resources[] = {
[0] = {
.name = "VPU",
.start = 0xfe900000,
.end = 0xfe902807,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device vpu_device = {
.name = "uio_pdrv_genirq",
.id = 0,
.dev = {
.platform_data = &vpu_platform_data,
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
};
static struct uio_info veu0_platform_data = {
.name = "VEU",
.version = "0",
.irq = 54,
};
static struct resource veu0_resources[] = {
[0] = {
.name = "VEU(1)",
.start = 0xfe920000,
.end = 0xfe9200b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu0_device = {
.name = "uio_pdrv_genirq",
.id = 1,
.dev = {
.platform_data = &veu0_platform_data,
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
};
static struct uio_info veu1_platform_data = {
.name = "VEU",
.version = "0",
.irq = 27,
};
static struct resource veu1_resources[] = {
[0] = {
.name = "VEU(2)",
.start = 0xfe924000,
.end = 0xfe9240b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu1_device = {
.name = "uio_pdrv_genirq",
.id = 2,
.dev = {
.platform_data = &veu1_platform_data,
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
};
static struct sh_timer_config cmt_platform_data = {
.name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
.clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
.name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 104,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cmt_device = {
.name = "sh_cmt",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
.clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
.name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 16,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu0_device = {
.name = "sh_tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
.clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
.name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 17,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu1_device = {
.name = "sh_tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
.clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
.name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 18,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu2_device = {
.name = "sh_tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct plat_sci_port sci_platform_data[] = {
{
.mapbase = 0xffe00000,
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
.clk = "scif0",
}, {
.flags = 0,
}
};
static struct platform_device sci_device = {
.name = "sh-sci",
.id = -1,
.dev = {
.platform_data = sci_platform_data,
},
};
static struct platform_device *sh7366_devices[] __initdata = {
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&iic_device,
&sci_device,
&usb_host_device,
&vpu_device,
&veu0_device,
&veu1_device,
};
static int __init sh7366_devices_setup(void)
{
platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
return platform_add_devices(sh7366_devices,
ARRAY_SIZE(sh7366_devices));
}
arch_initcall(sh7366_devices_setup);
static struct platform_device *sh7366_early_devices[] __initdata = {
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
void __init plat_early_device_setup(void)
{
early_platform_add_devices(sh7366_early_devices,
ARRAY_SIZE(sh7366_early_devices));
}
enum {
UNUSED=0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
ICB,
DMAC0, DMAC1, DMAC2, DMAC3,
VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
MFI, VPU, USB,
MMC_MMC1I, MMC_MMC2I, MMC_MMC3I,
DMAC4, DMAC5, DMAC_DADERR,
SCIF, SCIFA1, SCIFA2,
DENC, MSIOF,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
SDHI0, SDHI1, SDHI2, SDHI3,
CMT, TSIF, SIU,
TMU0, TMU1, TMU2,
VEU2, LCDC,
/* interrupt groups */
DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
INTC_VECT(ICB, 0x700),
INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20),
INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20),
INTC_VECT(MMC_MMC3I, 0xb40),
INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
INTC_VECT(DMAC_DADERR, 0xbc0),
INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20),
INTC_VECT(SCIFA2, 0xc40),
INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80),
INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440),
INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I),
INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
{ 0, 0, 0, VPU, 0, 0, 0, MFI } },
{ 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
{ 0, 0, 0, ICB } },
{ 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
{ 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } },
{ 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
{ 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } },
{ 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
{ 0, 0, 0, 0, 0, 0, 0, MSIOF } },
{ 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
{ SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
{ 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } },
{ 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
{ 0, 0, 0, 0, 0, 0, 0, TSIF } },
{ 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
{ 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } },
{ 0xa4080008, 0, 16, 4, /* IPRC */ { } },
{ 0xa408000c, 0, 16, 4, /* IPRD */ { } },
{ 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
{ 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } },
{ 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } },
{ 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } },
{ 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } },
{ 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } },
{ 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
{ 0xa408002c, 0, 16, 4, /* IPRL */ { } },
{ 0xa4140010, 0, 32, 4, /* INTPRI00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa414001c, 16, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4140024, 0, 8, /* INTREQ00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc, "sh7366", vectors, groups,
mask_registers, prio_registers, sense_registers,
ack_registers);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
void __init plat_mem_setup(void)
{
/* TODO: Register Node 1 */
}
| gpl-2.0 |
mturquette/linux-omap | arch/mips/pci/ops-bonito64.c | 507 | 4395 | /*
* Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* MIPS boards specific PCI support.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/mips-boards/bonito64.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#ifdef CONFIG_LEMOTE_FULOONG2E
#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(BONITO_PCICFG_BASE | (offset))
#define ID_SEL_BEGIN 11
#else
#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset))
#define ID_SEL_BEGIN 10
#endif
#define MAX_DEV_NUM (31 - ID_SEL_BEGIN)
static int bonito64_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus,
unsigned int devfn, int where,
u32 * data)
{
u32 busnum = bus->number;
u32 addr, type;
u32 dummy;
void *addrp;
int device = PCI_SLOT(devfn);
int function = PCI_FUNC(devfn);
int reg = where & ~3;
if (busnum == 0) {
/* Type 0 configuration for onboard PCI bus */
if (device > MAX_DEV_NUM)
return -1;
addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg;
type = 0;
} else {
/* Type 1 configuration for offboard PCI bus */
addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
type = 0x10000;
}
/* Clear aborts */
BONITO_PCICMD |= BONITO_PCICMD_MABORT_CLR | BONITO_PCICMD_MTABORT_CLR;
BONITO_PCIMAP_CFG = (addr >> 16) | type;
/* Flush Bonito register block */
dummy = BONITO_PCIMAP_CFG;
mmiowb();
addrp = CFG_SPACE_REG(addr & 0xffff);
if (access_type == PCI_ACCESS_WRITE) {
writel(cpu_to_le32(*data), addrp);
#ifndef CONFIG_LEMOTE_FULOONG2E
/* Wait till done */
while (BONITO_PCIMSTAT & 0xF);
#endif
} else {
*data = le32_to_cpu(readl(addrp));
}
/* Detect Master/Target abort */
if (BONITO_PCICMD & (BONITO_PCICMD_MABORT_CLR |
BONITO_PCICMD_MTABORT_CLR)) {
/* Error occurred */
/* Clear bits */
BONITO_PCICMD |= (BONITO_PCICMD_MABORT_CLR |
BONITO_PCICMD_MTABORT_CLR);
return -1;
}
return 0;
}
/*
* We can't address 8 and 16 bit words directly. Instead we have to
* read/write a 32bit word and mask/modify the data we actually want.
*/
static int bonito64_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
&data))
return -1;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int bonito64_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
data = val;
else {
if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
where, &data))
return -1;
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
}
if (bonito64_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
&data))
return -1;
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops bonito64_pci_ops = {
.read = bonito64_pcibios_read,
.write = bonito64_pcibios_write
};
| gpl-2.0 |
vaginessa/Googy-Max3-Kernel | drivers/usb/host/ehci-msm72k.c | 1019 | 20264 | /* ehci-msm.c - HSUSB Host Controller Driver Implementation
*
* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
*
* Partly derived from ehci-fsl.c and ehci-hcd.c
* Copyright (c) 2000-2004 by David Brownell
* Copyright (c) 2005 MontaVista Software
*
* All source code in this file is licensed under the following license except
* where indicated.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <mach/board.h>
#include <mach/rpc_hsusb.h>
#include <mach/msm_hsusb.h>
#include <mach/msm_hsusb_hw.h>
#include <mach/msm_otg.h>
#include <mach/clk.h>
#include <linux/wakelock.h>
#include <linux/pm_runtime.h>
#include <mach/msm72k_otg.h>
#define MSM_USB_BASE (hcd->regs)
struct msmusb_hcd {
struct ehci_hcd ehci;
struct clk *alt_core_clk;
struct clk *iface_clk;
unsigned in_lpm;
struct work_struct lpm_exit_work;
spinlock_t lock;
struct wake_lock wlock;
unsigned int clk_enabled;
struct msm_usb_host_platform_data *pdata;
unsigned running;
struct usb_phy *xceiv;
struct work_struct otg_work;
unsigned flags;
struct msm_otg_ops otg_ops;
};
static inline struct msmusb_hcd *hcd_to_mhcd(struct usb_hcd *hcd)
{
return (struct msmusb_hcd *) (hcd->hcd_priv);
}
static inline struct usb_hcd *mhcd_to_hcd(struct msmusb_hcd *mhcd)
{
return container_of((void *) mhcd, struct usb_hcd, hcd_priv);
}
static void msm_xusb_pm_qos_update(struct msmusb_hcd *mhcd, int vote)
{
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
/* if otg driver is available, it would take
* care of voting for appropriate pclk source
*/
if (mhcd->xceiv)
return;
if (vote)
clk_prepare_enable(pdata->ebi1_clk);
else
clk_disable_unprepare(pdata->ebi1_clk);
}
static void msm_xusb_enable_clks(struct msmusb_hcd *mhcd)
{
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
if (mhcd->clk_enabled)
return;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
/* OTG driver takes care of clock management */
break;
case USB_PHY_SERIAL_PMIC:
clk_prepare_enable(mhcd->alt_core_clk);
clk_prepare_enable(mhcd->iface_clk);
break;
default:
pr_err("%s: undefined phy type ( %X )\n", __func__,
pdata->phy_info);
return;
}
mhcd->clk_enabled = 1;
}
static void msm_xusb_disable_clks(struct msmusb_hcd *mhcd)
{
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
if (!mhcd->clk_enabled)
return;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
/* OTG driver takes care of clock management */
break;
case USB_PHY_SERIAL_PMIC:
clk_disable_unprepare(mhcd->alt_core_clk);
clk_disable_unprepare(mhcd->iface_clk);
break;
default:
pr_err("%s: undefined phy type ( %X )\n", __func__,
pdata->phy_info);
return;
}
mhcd->clk_enabled = 0;
}
static int usb_wakeup_phy(struct usb_hcd *hcd)
{
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
int ret = -ENODEV;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
break;
case USB_PHY_SERIAL_PMIC:
ret = msm_fsusb_resume_phy();
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
}
return ret;
}
#ifdef CONFIG_PM
static int usb_suspend_phy(struct usb_hcd *hcd)
{
int ret = 0;
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
break;
case USB_PHY_SERIAL_PMIC:
ret = msm_fsusb_set_remote_wakeup();
ret = msm_fsusb_suspend_phy();
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
ret = -ENODEV;
break;
}
return ret;
}
static int usb_lpm_enter(struct usb_hcd *hcd)
{
struct device *dev = container_of((void *)hcd, struct device,
platform_data);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
disable_irq(hcd->irq);
if (mhcd->in_lpm) {
pr_info("%s: already in lpm. nothing to do\n", __func__);
enable_irq(hcd->irq);
return 0;
}
if (HC_IS_RUNNING(hcd->state)) {
pr_info("%s: can't enter into lpm. controller is runnning\n",
__func__);
enable_irq(hcd->irq);
return -1;
}
pr_info("%s: lpm enter procedure started\n", __func__);
mhcd->in_lpm = 1;
if (usb_suspend_phy(hcd)) {
mhcd->in_lpm = 0;
enable_irq(hcd->irq);
pr_info("phy suspend failed\n");
pr_info("%s: lpm enter procedure end\n", __func__);
return -1;
}
msm_xusb_disable_clks(mhcd);
if (mhcd->xceiv && mhcd->xceiv->set_suspend)
mhcd->xceiv->set_suspend(mhcd->xceiv, 1);
if (device_may_wakeup(dev))
enable_irq_wake(hcd->irq);
enable_irq(hcd->irq);
pr_info("%s: lpm enter procedure end\n", __func__);
return 0;
}
#endif
void usb_lpm_exit_w(struct work_struct *work)
{
struct msmusb_hcd *mhcd = container_of((void *) work,
struct msmusb_hcd, lpm_exit_work);
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct device *dev = container_of((void *)hcd, struct device,
platform_data);
msm_xusb_enable_clks(mhcd);
if (usb_wakeup_phy(hcd)) {
pr_err("fatal error: cannot bring phy out of lpm\n");
return;
}
/* If resume signalling finishes before lpm exit, PCD is not set in
* USBSTS register. Drive resume signal to the downstream device now
* so that EHCI can process the upcoming port change interrupt.*/
writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
if (mhcd->xceiv && mhcd->xceiv->set_suspend)
mhcd->xceiv->set_suspend(mhcd->xceiv, 0);
if (device_may_wakeup(dev))
disable_irq_wake(hcd->irq);
enable_irq(hcd->irq);
}
static void usb_lpm_exit(struct usb_hcd *hcd)
{
unsigned long flags;
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
spin_lock_irqsave(&mhcd->lock, flags);
if (!mhcd->in_lpm) {
spin_unlock_irqrestore(&mhcd->lock, flags);
return;
}
mhcd->in_lpm = 0;
disable_irq_nosync(hcd->irq);
schedule_work(&mhcd->lpm_exit_work);
spin_unlock_irqrestore(&mhcd->lock, flags);
}
static irqreturn_t ehci_msm_irq(struct usb_hcd *hcd)
{
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, phy);
/*
* OTG scheduled a work to get Integrated PHY out of LPM,
* WAIT till then */
if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
if (atomic_read(&otg->in_lpm))
return IRQ_HANDLED;
return ehci_irq(hcd);
}
#ifdef CONFIG_PM
static int ehci_msm_bus_suspend(struct usb_hcd *hcd)
{
int ret;
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct device *dev = hcd->self.controller;
ret = ehci_bus_suspend(hcd);
if (ret) {
pr_err("ehci_bus suspend faield\n");
return ret;
}
if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
ret = usb_phy_set_suspend(mhcd->xceiv, 1);
else
ret = usb_lpm_enter(hcd);
pm_runtime_put_noidle(dev);
pm_runtime_suspend(dev);
wake_unlock(&mhcd->wlock);
return ret;
}
static int ehci_msm_bus_resume(struct usb_hcd *hcd)
{
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct device *dev = hcd->self.controller;
wake_lock(&mhcd->wlock);
pm_runtime_get_noresume(dev);
pm_runtime_resume(dev);
if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED) {
usb_phy_set_suspend(mhcd->xceiv, 0);
} else { /* PMIC serial phy */
usb_lpm_exit(hcd);
if (cancel_work_sync(&(mhcd->lpm_exit_work)))
usb_lpm_exit_w(&mhcd->lpm_exit_work);
}
return ehci_bus_resume(hcd);
}
#else
#define ehci_msm_bus_suspend NULL
#define ehci_msm_bus_resume NULL
#endif /* CONFIG_PM */
static int ehci_msm_reset(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
ehci->caps = USB_CAPLENGTH;
ehci->regs = USB_CAPLENGTH +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
/* cache the data to minimize the chip reads*/
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
retval = ehci_init(hcd);
if (retval)
return retval;
hcd->has_tt = 1;
ehci->sbrn = HCD_USB2;
retval = ehci_reset(ehci);
/* SW workaround for USB stability issues*/
writel(0x0, USB_AHB_MODE);
writel(0x0, USB_AHB_BURST);
return retval;
}
#define PTS_VAL(x) (PHY_TYPE(x) == USB_PHY_SERIAL_PMIC) ? PORTSC_PTS_SERIAL : \
PORTSC_PTS_ULPI
static int ehci_msm_run(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
int retval = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
u32 __iomem *reg_ptr;
u32 hcc_params;
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
hcd->uses_new_polling = 1;
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
/* set hostmode */
reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
ehci_writel(ehci, (USBMODE_VBUS | USBMODE_SDIS), reg_ptr);
/* port configuration - phy, port speed, port power, port enable */
while (port--)
ehci_writel(ehci, (PTS_VAL(pdata->phy_info) | PORT_POWER |
PORT_PE), &ehci->regs->port_status[port]);
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params))
ehci_writel(ehci, 0, &ehci->regs->segment);
ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
hcd->state = HC_STATE_RUNNING;
/*Enable appropriate Interrupts*/
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
return retval;
}
static struct hc_driver msm_hc_driver = {
.description = hcd_name,
.product_desc = "Qualcomm On-Chip EHCI Host Controller",
.hcd_priv_size = sizeof(struct msmusb_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_msm_irq,
.flags = HCD_USB2,
.reset = ehci_msm_reset,
.start = ehci_msm_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_msm_bus_suspend,
.bus_resume = ehci_msm_bus_resume,
.relinquish_port = ehci_relinquish_port,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static void msm_hsusb_request_host(void *handle, int request)
{
struct msmusb_hcd *mhcd = handle;
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, phy);
#ifdef CONFIG_USB_OTG
struct usb_device *udev = hcd->self.root_hub;
#endif
struct device *dev = hcd->self.controller;
switch (request) {
#ifdef CONFIG_USB_OTG
case REQUEST_HNP_SUSPEND:
/* disable Root hub auto suspend. As hardware is configured
* for peripheral mode, mark hardware is not available.
*/
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
pm_runtime_disable(&udev->dev);
/* Mark root hub as disconnected. This would
* protect suspend/resume via sysfs.
*/
udev->state = USB_STATE_NOTATTACHED;
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
hcd->state = HC_STATE_HALT;
pm_runtime_put_noidle(dev);
pm_runtime_suspend(dev);
}
break;
case REQUEST_HNP_RESUME:
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
pm_runtime_get_noresume(dev);
pm_runtime_resume(dev);
disable_irq(hcd->irq);
ehci_msm_reset(hcd);
ehci_msm_run(hcd);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
pm_runtime_enable(&udev->dev);
udev->state = USB_STATE_CONFIGURED;
enable_irq(hcd->irq);
}
break;
#endif
case REQUEST_RESUME:
usb_hcd_resume_root_hub(hcd);
break;
case REQUEST_START:
if (mhcd->running)
break;
pm_runtime_get_noresume(dev);
pm_runtime_resume(dev);
wake_lock(&mhcd->wlock);
msm_xusb_pm_qos_update(mhcd, 1);
msm_xusb_enable_clks(mhcd);
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
if (otg->set_clk)
otg->set_clk(mhcd->xceiv, 1);
if (pdata->vbus_power)
pdata->vbus_power(pdata->phy_info, 1);
if (pdata->config_gpio)
pdata->config_gpio(1);
usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
mhcd->running = 1;
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
if (otg->set_clk)
otg->set_clk(mhcd->xceiv, 0);
break;
case REQUEST_STOP:
if (!mhcd->running)
break;
mhcd->running = 0;
/* come out of lpm before deregistration */
if (PHY_TYPE(pdata->phy_info) == USB_PHY_SERIAL_PMIC) {
usb_lpm_exit(hcd);
if (cancel_work_sync(&(mhcd->lpm_exit_work)))
usb_lpm_exit_w(&mhcd->lpm_exit_work);
}
usb_remove_hcd(hcd);
if (pdata->config_gpio)
pdata->config_gpio(0);
if (pdata->vbus_power)
pdata->vbus_power(pdata->phy_info, 0);
msm_xusb_disable_clks(mhcd);
wake_lock_timeout(&mhcd->wlock, HZ/2);
msm_xusb_pm_qos_update(mhcd, 0);
pm_runtime_put_noidle(dev);
pm_runtime_suspend(dev);
break;
}
}
static void msm_hsusb_otg_work(struct work_struct *work)
{
struct msmusb_hcd *mhcd;
mhcd = container_of(work, struct msmusb_hcd, otg_work);
msm_hsusb_request_host((void *)mhcd, mhcd->flags);
}
static void msm_hsusb_start_host(struct usb_bus *bus, int start)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
mhcd->flags = start;
if (in_interrupt())
schedule_work(&mhcd->otg_work);
else
msm_hsusb_request_host((void *)mhcd, mhcd->flags);
}
static int msm_xusb_init_phy(struct msmusb_hcd *mhcd)
{
int ret = -ENODEV;
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
ret = 0;
case USB_PHY_SERIAL_PMIC:
msm_xusb_enable_clks(mhcd);
writel(0, USB_USBINTR);
ret = msm_fsusb_rpc_init(&mhcd->otg_ops);
if (!ret)
msm_fsusb_init_phy();
msm_xusb_disable_clks(mhcd);
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
}
return ret;
}
static int msm_xusb_rpc_close(struct msmusb_hcd *mhcd)
{
int retval = -ENODEV;
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
if (!mhcd->xceiv)
retval = msm_hsusb_rpc_close();
break;
case USB_PHY_SERIAL_PMIC:
retval = msm_fsusb_reset_phy();
msm_fsusb_rpc_deinit();
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
}
return retval;
}
static int msm_xusb_init_host(struct platform_device *pdev,
struct msmusb_hcd *mhcd)
{
int ret = 0;
struct msm_otg *otg;
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
msm_hsusb_rpc_connect();
if (pdata->vbus_init)
pdata->vbus_init(1);
/* VBUS might be present. Turn off vbus */
if (pdata->vbus_power)
pdata->vbus_power(pdata->phy_info, 0);
INIT_WORK(&mhcd->otg_work, msm_hsusb_otg_work);
mhcd->xceiv = usb_get_transceiver();
if (!mhcd->xceiv)
return -ENODEV;
otg = container_of(mhcd->xceiv, struct msm_otg, phy);
hcd->regs = otg->regs;
otg->start_host = msm_hsusb_start_host;
ret = otg_set_host(mhcd->xceiv->otg, &hcd->self);
ehci->transceiver = mhcd->xceiv;
break;
case USB_PHY_SERIAL_PMIC:
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs)
return -EFAULT;
/* get usb clocks */
mhcd->alt_core_clk = clk_get(&pdev->dev, "alt_core_clk");
if (IS_ERR(mhcd->alt_core_clk)) {
iounmap(hcd->regs);
return PTR_ERR(mhcd->alt_core_clk);
}
mhcd->iface_clk = clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(mhcd->iface_clk)) {
iounmap(hcd->regs);
clk_put(mhcd->alt_core_clk);
return PTR_ERR(mhcd->iface_clk);
}
mhcd->otg_ops.request = msm_hsusb_request_host;
mhcd->otg_ops.handle = (void *) mhcd;
ret = msm_xusb_init_phy(mhcd);
if (ret < 0) {
iounmap(hcd->regs);
clk_put(mhcd->alt_core_clk);
clk_put(mhcd->iface_clk);
}
break;
default:
pr_err("phy type is bad\n");
}
return ret;
}
static int __devinit ehci_msm_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct resource *res;
struct msm_usb_host_platform_data *pdata;
int retval;
struct msmusb_hcd *mhcd;
hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
hcd->irq = platform_get_irq(pdev, 0);
if (hcd->irq < 0) {
usb_put_hcd(hcd);
return hcd->irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
usb_put_hcd(hcd);
return -ENODEV;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
mhcd = hcd_to_mhcd(hcd);
spin_lock_init(&mhcd->lock);
mhcd->in_lpm = 0;
mhcd->running = 0;
device_init_wakeup(&pdev->dev, 1);
pdata = pdev->dev.platform_data;
if (PHY_TYPE(pdata->phy_info) == USB_PHY_UNDEFINED) {
usb_put_hcd(hcd);
return -ENODEV;
}
hcd->power_budget = pdata->power_budget;
mhcd->pdata = pdata;
INIT_WORK(&mhcd->lpm_exit_work, usb_lpm_exit_w);
wake_lock_init(&mhcd->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev));
pdata->ebi1_clk = clk_get(&pdev->dev, "core_clk");
if (IS_ERR(pdata->ebi1_clk))
pdata->ebi1_clk = NULL;
else
clk_set_rate(pdata->ebi1_clk, INT_MAX);
retval = msm_xusb_init_host(pdev, mhcd);
if (retval < 0) {
wake_lock_destroy(&mhcd->wlock);
usb_put_hcd(hcd);
clk_put(pdata->ebi1_clk);
}
pm_runtime_enable(&pdev->dev);
return retval;
}
static void msm_xusb_uninit_host(struct msmusb_hcd *mhcd)
{
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
if (pdata->vbus_init)
pdata->vbus_init(0);
hcd_to_ehci(hcd)->transceiver = NULL;
otg_set_host(mhcd->xceiv->otg, NULL);
usb_put_transceiver(mhcd->xceiv);
cancel_work_sync(&mhcd->otg_work);
break;
case USB_PHY_SERIAL_PMIC:
iounmap(hcd->regs);
clk_put(mhcd->alt_core_clk);
clk_put(mhcd->iface_clk);
msm_fsusb_reset_phy();
msm_fsusb_rpc_deinit();
break;
default:
pr_err("phy type is bad\n");
}
}
static int __exit ehci_msm_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_usb_host_platform_data *pdata;
int retval = 0;
pdata = pdev->dev.platform_data;
device_init_wakeup(&pdev->dev, 0);
msm_hsusb_request_host((void *)mhcd, REQUEST_STOP);
msm_xusb_uninit_host(mhcd);
retval = msm_xusb_rpc_close(mhcd);
wake_lock_destroy(&mhcd->wlock);
usb_put_hcd(hcd);
clk_put(pdata->ebi1_clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
return retval;
}
static int ehci_msm_runtime_suspend(struct device *dev)
{
dev_dbg(dev, "pm_runtime: suspending...\n");
return 0;
}
static int ehci_msm_runtime_resume(struct device *dev)
{
dev_dbg(dev, "pm_runtime: resuming...\n");
return 0;
}
static int ehci_msm_runtime_idle(struct device *dev)
{
dev_dbg(dev, "pm_runtime: idling...\n");
return 0;
}
static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
.runtime_suspend = ehci_msm_runtime_suspend,
.runtime_resume = ehci_msm_runtime_resume,
.runtime_idle = ehci_msm_runtime_idle
};
static struct platform_driver ehci_msm_driver = {
.probe = ehci_msm_probe,
.remove = __exit_p(ehci_msm_remove),
.driver = {.name = "msm_hsusb_host",
.pm = &ehci_msm_dev_pm_ops, },
};
| gpl-2.0 |
CyanogenMod/android_kernel_sony_msm8930 | drivers/usb/host/ehci-msm72k.c | 1019 | 20264 | /* ehci-msm.c - HSUSB Host Controller Driver Implementation
*
* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
*
* Partly derived from ehci-fsl.c and ehci-hcd.c
* Copyright (c) 2000-2004 by David Brownell
* Copyright (c) 2005 MontaVista Software
*
* All source code in this file is licensed under the following license except
* where indicated.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <mach/board.h>
#include <mach/rpc_hsusb.h>
#include <mach/msm_hsusb.h>
#include <mach/msm_hsusb_hw.h>
#include <mach/msm_otg.h>
#include <mach/clk.h>
#include <linux/wakelock.h>
#include <linux/pm_runtime.h>
#include <mach/msm72k_otg.h>
#define MSM_USB_BASE (hcd->regs)
struct msmusb_hcd {
struct ehci_hcd ehci;
struct clk *alt_core_clk;
struct clk *iface_clk;
unsigned in_lpm;
struct work_struct lpm_exit_work;
spinlock_t lock;
struct wake_lock wlock;
unsigned int clk_enabled;
struct msm_usb_host_platform_data *pdata;
unsigned running;
struct usb_phy *xceiv;
struct work_struct otg_work;
unsigned flags;
struct msm_otg_ops otg_ops;
};
static inline struct msmusb_hcd *hcd_to_mhcd(struct usb_hcd *hcd)
{
return (struct msmusb_hcd *) (hcd->hcd_priv);
}
static inline struct usb_hcd *mhcd_to_hcd(struct msmusb_hcd *mhcd)
{
return container_of((void *) mhcd, struct usb_hcd, hcd_priv);
}
static void msm_xusb_pm_qos_update(struct msmusb_hcd *mhcd, int vote)
{
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
/* if otg driver is available, it would take
* care of voting for appropriate pclk source
*/
if (mhcd->xceiv)
return;
if (vote)
clk_prepare_enable(pdata->ebi1_clk);
else
clk_disable_unprepare(pdata->ebi1_clk);
}
static void msm_xusb_enable_clks(struct msmusb_hcd *mhcd)
{
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
if (mhcd->clk_enabled)
return;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
/* OTG driver takes care of clock management */
break;
case USB_PHY_SERIAL_PMIC:
clk_prepare_enable(mhcd->alt_core_clk);
clk_prepare_enable(mhcd->iface_clk);
break;
default:
pr_err("%s: undefined phy type ( %X )\n", __func__,
pdata->phy_info);
return;
}
mhcd->clk_enabled = 1;
}
static void msm_xusb_disable_clks(struct msmusb_hcd *mhcd)
{
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
if (!mhcd->clk_enabled)
return;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
/* OTG driver takes care of clock management */
break;
case USB_PHY_SERIAL_PMIC:
clk_disable_unprepare(mhcd->alt_core_clk);
clk_disable_unprepare(mhcd->iface_clk);
break;
default:
pr_err("%s: undefined phy type ( %X )\n", __func__,
pdata->phy_info);
return;
}
mhcd->clk_enabled = 0;
}
static int usb_wakeup_phy(struct usb_hcd *hcd)
{
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
int ret = -ENODEV;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
break;
case USB_PHY_SERIAL_PMIC:
ret = msm_fsusb_resume_phy();
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
}
return ret;
}
#ifdef CONFIG_PM
static int usb_suspend_phy(struct usb_hcd *hcd)
{
int ret = 0;
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
break;
case USB_PHY_SERIAL_PMIC:
ret = msm_fsusb_set_remote_wakeup();
ret = msm_fsusb_suspend_phy();
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
ret = -ENODEV;
break;
}
return ret;
}
static int usb_lpm_enter(struct usb_hcd *hcd)
{
struct device *dev = container_of((void *)hcd, struct device,
platform_data);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
disable_irq(hcd->irq);
if (mhcd->in_lpm) {
pr_info("%s: already in lpm. nothing to do\n", __func__);
enable_irq(hcd->irq);
return 0;
}
if (HC_IS_RUNNING(hcd->state)) {
pr_info("%s: can't enter into lpm. controller is runnning\n",
__func__);
enable_irq(hcd->irq);
return -1;
}
pr_info("%s: lpm enter procedure started\n", __func__);
mhcd->in_lpm = 1;
if (usb_suspend_phy(hcd)) {
mhcd->in_lpm = 0;
enable_irq(hcd->irq);
pr_info("phy suspend failed\n");
pr_info("%s: lpm enter procedure end\n", __func__);
return -1;
}
msm_xusb_disable_clks(mhcd);
if (mhcd->xceiv && mhcd->xceiv->set_suspend)
mhcd->xceiv->set_suspend(mhcd->xceiv, 1);
if (device_may_wakeup(dev))
enable_irq_wake(hcd->irq);
enable_irq(hcd->irq);
pr_info("%s: lpm enter procedure end\n", __func__);
return 0;
}
#endif
void usb_lpm_exit_w(struct work_struct *work)
{
struct msmusb_hcd *mhcd = container_of((void *) work,
struct msmusb_hcd, lpm_exit_work);
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct device *dev = container_of((void *)hcd, struct device,
platform_data);
msm_xusb_enable_clks(mhcd);
if (usb_wakeup_phy(hcd)) {
pr_err("fatal error: cannot bring phy out of lpm\n");
return;
}
/* If resume signalling finishes before lpm exit, PCD is not set in
* USBSTS register. Drive resume signal to the downstream device now
* so that EHCI can process the upcoming port change interrupt.*/
writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
if (mhcd->xceiv && mhcd->xceiv->set_suspend)
mhcd->xceiv->set_suspend(mhcd->xceiv, 0);
if (device_may_wakeup(dev))
disable_irq_wake(hcd->irq);
enable_irq(hcd->irq);
}
static void usb_lpm_exit(struct usb_hcd *hcd)
{
unsigned long flags;
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
spin_lock_irqsave(&mhcd->lock, flags);
if (!mhcd->in_lpm) {
spin_unlock_irqrestore(&mhcd->lock, flags);
return;
}
mhcd->in_lpm = 0;
disable_irq_nosync(hcd->irq);
schedule_work(&mhcd->lpm_exit_work);
spin_unlock_irqrestore(&mhcd->lock, flags);
}
static irqreturn_t ehci_msm_irq(struct usb_hcd *hcd)
{
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, phy);
/*
* OTG scheduled a work to get Integrated PHY out of LPM,
* WAIT till then */
if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
if (atomic_read(&otg->in_lpm))
return IRQ_HANDLED;
return ehci_irq(hcd);
}
#ifdef CONFIG_PM
static int ehci_msm_bus_suspend(struct usb_hcd *hcd)
{
int ret;
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct device *dev = hcd->self.controller;
ret = ehci_bus_suspend(hcd);
if (ret) {
pr_err("ehci_bus suspend faield\n");
return ret;
}
if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
ret = usb_phy_set_suspend(mhcd->xceiv, 1);
else
ret = usb_lpm_enter(hcd);
pm_runtime_put_noidle(dev);
pm_runtime_suspend(dev);
wake_unlock(&mhcd->wlock);
return ret;
}
static int ehci_msm_bus_resume(struct usb_hcd *hcd)
{
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct device *dev = hcd->self.controller;
wake_lock(&mhcd->wlock);
pm_runtime_get_noresume(dev);
pm_runtime_resume(dev);
if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED) {
usb_phy_set_suspend(mhcd->xceiv, 0);
} else { /* PMIC serial phy */
usb_lpm_exit(hcd);
if (cancel_work_sync(&(mhcd->lpm_exit_work)))
usb_lpm_exit_w(&mhcd->lpm_exit_work);
}
return ehci_bus_resume(hcd);
}
#else
#define ehci_msm_bus_suspend NULL
#define ehci_msm_bus_resume NULL
#endif /* CONFIG_PM */
static int ehci_msm_reset(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
ehci->caps = USB_CAPLENGTH;
ehci->regs = USB_CAPLENGTH +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
/* cache the data to minimize the chip reads*/
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
retval = ehci_init(hcd);
if (retval)
return retval;
hcd->has_tt = 1;
ehci->sbrn = HCD_USB2;
retval = ehci_reset(ehci);
/* SW workaround for USB stability issues*/
writel(0x0, USB_AHB_MODE);
writel(0x0, USB_AHB_BURST);
return retval;
}
#define PTS_VAL(x) (PHY_TYPE(x) == USB_PHY_SERIAL_PMIC) ? PORTSC_PTS_SERIAL : \
PORTSC_PTS_ULPI
static int ehci_msm_run(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
int retval = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
u32 __iomem *reg_ptr;
u32 hcc_params;
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
hcd->uses_new_polling = 1;
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
/* set hostmode */
reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
ehci_writel(ehci, (USBMODE_VBUS | USBMODE_SDIS), reg_ptr);
/* port configuration - phy, port speed, port power, port enable */
while (port--)
ehci_writel(ehci, (PTS_VAL(pdata->phy_info) | PORT_POWER |
PORT_PE), &ehci->regs->port_status[port]);
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params))
ehci_writel(ehci, 0, &ehci->regs->segment);
ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
hcd->state = HC_STATE_RUNNING;
/*Enable appropriate Interrupts*/
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
return retval;
}
static struct hc_driver msm_hc_driver = {
.description = hcd_name,
.product_desc = "Qualcomm On-Chip EHCI Host Controller",
.hcd_priv_size = sizeof(struct msmusb_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_msm_irq,
.flags = HCD_USB2,
.reset = ehci_msm_reset,
.start = ehci_msm_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_msm_bus_suspend,
.bus_resume = ehci_msm_bus_resume,
.relinquish_port = ehci_relinquish_port,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static void msm_hsusb_request_host(void *handle, int request)
{
struct msmusb_hcd *mhcd = handle;
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, phy);
#ifdef CONFIG_USB_OTG
struct usb_device *udev = hcd->self.root_hub;
#endif
struct device *dev = hcd->self.controller;
switch (request) {
#ifdef CONFIG_USB_OTG
case REQUEST_HNP_SUSPEND:
/* disable Root hub auto suspend. As hardware is configured
* for peripheral mode, mark hardware is not available.
*/
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
pm_runtime_disable(&udev->dev);
/* Mark root hub as disconnected. This would
* protect suspend/resume via sysfs.
*/
udev->state = USB_STATE_NOTATTACHED;
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
hcd->state = HC_STATE_HALT;
pm_runtime_put_noidle(dev);
pm_runtime_suspend(dev);
}
break;
case REQUEST_HNP_RESUME:
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
pm_runtime_get_noresume(dev);
pm_runtime_resume(dev);
disable_irq(hcd->irq);
ehci_msm_reset(hcd);
ehci_msm_run(hcd);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
pm_runtime_enable(&udev->dev);
udev->state = USB_STATE_CONFIGURED;
enable_irq(hcd->irq);
}
break;
#endif
case REQUEST_RESUME:
usb_hcd_resume_root_hub(hcd);
break;
case REQUEST_START:
if (mhcd->running)
break;
pm_runtime_get_noresume(dev);
pm_runtime_resume(dev);
wake_lock(&mhcd->wlock);
msm_xusb_pm_qos_update(mhcd, 1);
msm_xusb_enable_clks(mhcd);
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
if (otg->set_clk)
otg->set_clk(mhcd->xceiv, 1);
if (pdata->vbus_power)
pdata->vbus_power(pdata->phy_info, 1);
if (pdata->config_gpio)
pdata->config_gpio(1);
usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
mhcd->running = 1;
if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
if (otg->set_clk)
otg->set_clk(mhcd->xceiv, 0);
break;
case REQUEST_STOP:
if (!mhcd->running)
break;
mhcd->running = 0;
/* come out of lpm before deregistration */
if (PHY_TYPE(pdata->phy_info) == USB_PHY_SERIAL_PMIC) {
usb_lpm_exit(hcd);
if (cancel_work_sync(&(mhcd->lpm_exit_work)))
usb_lpm_exit_w(&mhcd->lpm_exit_work);
}
usb_remove_hcd(hcd);
if (pdata->config_gpio)
pdata->config_gpio(0);
if (pdata->vbus_power)
pdata->vbus_power(pdata->phy_info, 0);
msm_xusb_disable_clks(mhcd);
wake_lock_timeout(&mhcd->wlock, HZ/2);
msm_xusb_pm_qos_update(mhcd, 0);
pm_runtime_put_noidle(dev);
pm_runtime_suspend(dev);
break;
}
}
static void msm_hsusb_otg_work(struct work_struct *work)
{
struct msmusb_hcd *mhcd;
mhcd = container_of(work, struct msmusb_hcd, otg_work);
msm_hsusb_request_host((void *)mhcd, mhcd->flags);
}
static void msm_hsusb_start_host(struct usb_bus *bus, int start)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
mhcd->flags = start;
if (in_interrupt())
schedule_work(&mhcd->otg_work);
else
msm_hsusb_request_host((void *)mhcd, mhcd->flags);
}
static int msm_xusb_init_phy(struct msmusb_hcd *mhcd)
{
int ret = -ENODEV;
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
ret = 0;
case USB_PHY_SERIAL_PMIC:
msm_xusb_enable_clks(mhcd);
writel(0, USB_USBINTR);
ret = msm_fsusb_rpc_init(&mhcd->otg_ops);
if (!ret)
msm_fsusb_init_phy();
msm_xusb_disable_clks(mhcd);
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
}
return ret;
}
static int msm_xusb_rpc_close(struct msmusb_hcd *mhcd)
{
int retval = -ENODEV;
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
if (!mhcd->xceiv)
retval = msm_hsusb_rpc_close();
break;
case USB_PHY_SERIAL_PMIC:
retval = msm_fsusb_reset_phy();
msm_fsusb_rpc_deinit();
break;
default:
pr_err("%s: undefined phy type ( %X ) \n", __func__,
pdata->phy_info);
}
return retval;
}
static int msm_xusb_init_host(struct platform_device *pdev,
struct msmusb_hcd *mhcd)
{
int ret = 0;
struct msm_otg *otg;
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
msm_hsusb_rpc_connect();
if (pdata->vbus_init)
pdata->vbus_init(1);
/* VBUS might be present. Turn off vbus */
if (pdata->vbus_power)
pdata->vbus_power(pdata->phy_info, 0);
INIT_WORK(&mhcd->otg_work, msm_hsusb_otg_work);
mhcd->xceiv = usb_get_transceiver();
if (!mhcd->xceiv)
return -ENODEV;
otg = container_of(mhcd->xceiv, struct msm_otg, phy);
hcd->regs = otg->regs;
otg->start_host = msm_hsusb_start_host;
ret = otg_set_host(mhcd->xceiv->otg, &hcd->self);
ehci->transceiver = mhcd->xceiv;
break;
case USB_PHY_SERIAL_PMIC:
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs)
return -EFAULT;
/* get usb clocks */
mhcd->alt_core_clk = clk_get(&pdev->dev, "alt_core_clk");
if (IS_ERR(mhcd->alt_core_clk)) {
iounmap(hcd->regs);
return PTR_ERR(mhcd->alt_core_clk);
}
mhcd->iface_clk = clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(mhcd->iface_clk)) {
iounmap(hcd->regs);
clk_put(mhcd->alt_core_clk);
return PTR_ERR(mhcd->iface_clk);
}
mhcd->otg_ops.request = msm_hsusb_request_host;
mhcd->otg_ops.handle = (void *) mhcd;
ret = msm_xusb_init_phy(mhcd);
if (ret < 0) {
iounmap(hcd->regs);
clk_put(mhcd->alt_core_clk);
clk_put(mhcd->iface_clk);
}
break;
default:
pr_err("phy type is bad\n");
}
return ret;
}
static int __devinit ehci_msm_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct resource *res;
struct msm_usb_host_platform_data *pdata;
int retval;
struct msmusb_hcd *mhcd;
hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
hcd->irq = platform_get_irq(pdev, 0);
if (hcd->irq < 0) {
usb_put_hcd(hcd);
return hcd->irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
usb_put_hcd(hcd);
return -ENODEV;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
mhcd = hcd_to_mhcd(hcd);
spin_lock_init(&mhcd->lock);
mhcd->in_lpm = 0;
mhcd->running = 0;
device_init_wakeup(&pdev->dev, 1);
pdata = pdev->dev.platform_data;
if (PHY_TYPE(pdata->phy_info) == USB_PHY_UNDEFINED) {
usb_put_hcd(hcd);
return -ENODEV;
}
hcd->power_budget = pdata->power_budget;
mhcd->pdata = pdata;
INIT_WORK(&mhcd->lpm_exit_work, usb_lpm_exit_w);
wake_lock_init(&mhcd->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev));
pdata->ebi1_clk = clk_get(&pdev->dev, "core_clk");
if (IS_ERR(pdata->ebi1_clk))
pdata->ebi1_clk = NULL;
else
clk_set_rate(pdata->ebi1_clk, INT_MAX);
retval = msm_xusb_init_host(pdev, mhcd);
if (retval < 0) {
wake_lock_destroy(&mhcd->wlock);
usb_put_hcd(hcd);
clk_put(pdata->ebi1_clk);
}
pm_runtime_enable(&pdev->dev);
return retval;
}
static void msm_xusb_uninit_host(struct msmusb_hcd *mhcd)
{
struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
struct msm_usb_host_platform_data *pdata = mhcd->pdata;
switch (PHY_TYPE(pdata->phy_info)) {
case USB_PHY_INTEGRATED:
if (pdata->vbus_init)
pdata->vbus_init(0);
hcd_to_ehci(hcd)->transceiver = NULL;
otg_set_host(mhcd->xceiv->otg, NULL);
usb_put_transceiver(mhcd->xceiv);
cancel_work_sync(&mhcd->otg_work);
break;
case USB_PHY_SERIAL_PMIC:
iounmap(hcd->regs);
clk_put(mhcd->alt_core_clk);
clk_put(mhcd->iface_clk);
msm_fsusb_reset_phy();
msm_fsusb_rpc_deinit();
break;
default:
pr_err("phy type is bad\n");
}
}
static int __exit ehci_msm_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
struct msm_usb_host_platform_data *pdata;
int retval = 0;
pdata = pdev->dev.platform_data;
device_init_wakeup(&pdev->dev, 0);
msm_hsusb_request_host((void *)mhcd, REQUEST_STOP);
msm_xusb_uninit_host(mhcd);
retval = msm_xusb_rpc_close(mhcd);
wake_lock_destroy(&mhcd->wlock);
usb_put_hcd(hcd);
clk_put(pdata->ebi1_clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
return retval;
}
static int ehci_msm_runtime_suspend(struct device *dev)
{
dev_dbg(dev, "pm_runtime: suspending...\n");
return 0;
}
static int ehci_msm_runtime_resume(struct device *dev)
{
dev_dbg(dev, "pm_runtime: resuming...\n");
return 0;
}
static int ehci_msm_runtime_idle(struct device *dev)
{
dev_dbg(dev, "pm_runtime: idling...\n");
return 0;
}
static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
.runtime_suspend = ehci_msm_runtime_suspend,
.runtime_resume = ehci_msm_runtime_resume,
.runtime_idle = ehci_msm_runtime_idle
};
static struct platform_driver ehci_msm_driver = {
.probe = ehci_msm_probe,
.remove = __exit_p(ehci_msm_remove),
.driver = {.name = "msm_hsusb_host",
.pm = &ehci_msm_dev_pm_ops, },
};
| gpl-2.0 |
SlimRoms/kernel_nvidia_shieldtablet | drivers/md/dm-verity.c | 1019 | 23495 | /*
* Copyright (C) 2012 Red Hat, Inc.
*
* Author: Mikulas Patocka <mpatocka@redhat.com>
*
* Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
*
* This file is released under the GPLv2.
*
* In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
* default prefetch value. Data are read in "prefetch_cluster" chunks from the
* hash device. Setting this greatly improves performance when data and hash
* are on the same disk on different partitions on devices with poor random
* access behavior.
*/
#include "dm-bufio.h"
#include <linux/module.h>
#include <linux/device-mapper.h>
#include <crypto/hash.h>
#define DM_MSG_PREFIX "verity"
#define DM_VERITY_IO_VEC_INLINE 16
#define DM_VERITY_MEMPOOL_SIZE 4
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
#define DM_VERITY_MAX_LEVELS 63
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
struct dm_verity {
struct dm_dev *data_dev;
struct dm_dev *hash_dev;
struct dm_target *ti;
struct dm_bufio_client *bufio;
char *alg_name;
struct crypto_shash *tfm;
u8 *root_digest; /* digest of the root block */
u8 *salt; /* salt: its size is salt_size */
unsigned salt_size;
sector_t data_start; /* data offset in 512-byte sectors */
sector_t hash_start; /* hash start in blocks */
sector_t data_blocks; /* the number of data blocks */
sector_t hash_blocks; /* the number of hash blocks */
unsigned char data_dev_block_bits; /* log2(data blocksize) */
unsigned char hash_dev_block_bits; /* log2(hash blocksize) */
unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
unsigned char levels; /* the number of tree levels */
unsigned char version;
unsigned digest_size; /* digest size for the current hash algorithm */
unsigned shash_descsize;/* the size of temporary space for crypto */
int hash_failed; /* set to 1 if hash of any block failed */
mempool_t *vec_mempool; /* mempool of bio vector */
struct workqueue_struct *verify_wq;
/* starting blocks for each tree level. 0 is the lowest level. */
sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
};
struct dm_verity_io {
struct dm_verity *v;
/* original values of bio->bi_end_io and bio->bi_private */
bio_end_io_t *orig_bi_end_io;
void *orig_bi_private;
sector_t block;
unsigned n_blocks;
/* saved bio vector */
struct bio_vec *io_vec;
unsigned io_vec_size;
struct work_struct work;
/* A space for short vectors; longer vectors are allocated separately. */
struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
/*
* Three variably-size fields follow this struct:
*
* u8 hash_desc[v->shash_descsize];
* u8 real_digest[v->digest_size];
* u8 want_digest[v->digest_size];
*
* To access them use: io_hash_desc(), io_real_digest() and io_want_digest().
*/
};
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
sector_t block;
unsigned n_blocks;
};
static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
{
return (struct shash_desc *)(io + 1);
}
static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io)
{
return (u8 *)(io + 1) + v->shash_descsize;
}
static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io)
{
return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
}
/*
* Auxiliary structure appended to each dm-bufio buffer. If the value
* hash_verified is nonzero, hash of the block has been verified.
*
* The variable hash_verified is set to 0 when allocating the buffer, then
* it can be changed to 1 and it is never reset to 0 again.
*
* There is no lock around this value, a race condition can at worst cause
* that multiple processes verify the hash of the same buffer simultaneously
* and write 1 to hash_verified simultaneously.
* This condition is harmless, so we don't need locking.
*/
struct buffer_aux {
int hash_verified;
};
/*
* Initialize struct buffer_aux for a freshly created buffer.
*/
static void dm_bufio_alloc_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
aux->hash_verified = 0;
}
/*
* Translate input sector number to the sector number on the target device.
*/
static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
{
return v->data_start + dm_target_offset(v->ti, bi_sector);
}
/*
* Return hash position of a specified block at a specified tree level
* (0 is the lowest level).
* The lowest "hash_per_block_bits"-bits of the result denote hash position
* inside a hash block. The remaining bits denote location of the hash block.
*/
static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
int level)
{
return block >> (level * v->hash_per_block_bits);
}
static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
sector_t *hash_block, unsigned *offset)
{
sector_t position = verity_position_at_level(v, block, level);
unsigned idx;
*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
if (!offset)
return;
idx = position & ((1 << v->hash_per_block_bits) - 1);
if (!v->version)
*offset = idx * v->digest_size;
else
*offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
}
/*
* Verify hash of a metadata block pertaining to the specified data block
* ("block" argument) at a specified level ("level" argument).
*
* On successful return, io_want_digest(v, io) contains the hash value for
* a lower tree level or for the data block (if we're at the lowest leve).
*
* If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
* If "skip_unverified" is false, unverified buffer is hashed and verified
* against current value of io_want_digest(v, io).
*/
static int verity_verify_level(struct dm_verity_io *io, sector_t block,
int level, bool skip_unverified)
{
struct dm_verity *v = io->v;
struct dm_buffer *buf;
struct buffer_aux *aux;
u8 *data;
int r;
sector_t hash_block;
unsigned offset;
verity_hash_at_level(v, block, level, &hash_block, &offset);
data = dm_bufio_read(v->bufio, hash_block, &buf);
if (unlikely(IS_ERR(data)))
return PTR_ERR(data);
aux = dm_bufio_get_aux_data(buf);
if (!aux->hash_verified) {
struct shash_desc *desc;
u8 *result;
if (skip_unverified) {
r = 1;
goto release_ret_r;
}
desc = io_hash_desc(v, io);
desc->tfm = v->tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
r = crypto_shash_init(desc);
if (r < 0) {
DMERR("crypto_shash_init failed: %d", r);
goto release_ret_r;
}
if (likely(v->version >= 1)) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
goto release_ret_r;
}
}
r = crypto_shash_update(desc, data, 1 << v->hash_dev_block_bits);
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
goto release_ret_r;
}
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
goto release_ret_r;
}
}
result = io_real_digest(v, io);
r = crypto_shash_final(desc, result);
if (r < 0) {
DMERR("crypto_shash_final failed: %d", r);
goto release_ret_r;
}
if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
DMERR_LIMIT("metadata block %llu is corrupted",
(unsigned long long)hash_block);
v->hash_failed = 1;
r = -EIO;
goto release_ret_r;
} else
aux->hash_verified = 1;
}
data += offset;
memcpy(io_want_digest(v, io), data, v->digest_size);
dm_bufio_release(buf);
return 0;
release_ret_r:
dm_bufio_release(buf);
return r;
}
/*
* Verify one "dm_verity_io" structure.
*/
static int verity_verify_io(struct dm_verity_io *io)
{
struct dm_verity *v = io->v;
unsigned b;
int i;
unsigned vector = 0, offset = 0;
for (b = 0; b < io->n_blocks; b++) {
struct shash_desc *desc;
u8 *result;
int r;
unsigned todo;
if (likely(v->levels)) {
/*
* First, we try to get the requested hash for
* the current block. If the hash block itself is
* verified, zero is returned. If it isn't, this
* function returns 0 and we fall back to whole
* chain verification.
*/
int r = verity_verify_level(io, io->block + b, 0, true);
if (likely(!r))
goto test_block_hash;
if (r < 0)
return r;
}
memcpy(io_want_digest(v, io), v->root_digest, v->digest_size);
for (i = v->levels - 1; i >= 0; i--) {
int r = verity_verify_level(io, io->block + b, i, false);
if (unlikely(r))
return r;
}
test_block_hash:
desc = io_hash_desc(v, io);
desc->tfm = v->tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
r = crypto_shash_init(desc);
if (r < 0) {
DMERR("crypto_shash_init failed: %d", r);
return r;
}
if (likely(v->version >= 1)) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
return r;
}
}
todo = 1 << v->data_dev_block_bits;
do {
struct bio_vec *bv;
u8 *page;
unsigned len;
BUG_ON(vector >= io->io_vec_size);
bv = &io->io_vec[vector];
page = kmap_atomic(bv->bv_page);
len = bv->bv_len - offset;
if (likely(len >= todo))
len = todo;
r = crypto_shash_update(desc,
page + bv->bv_offset + offset, len);
kunmap_atomic(page);
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
return r;
}
offset += len;
if (likely(offset == bv->bv_len)) {
offset = 0;
vector++;
}
todo -= len;
} while (todo);
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
if (r < 0) {
DMERR("crypto_shash_update failed: %d", r);
return r;
}
}
result = io_real_digest(v, io);
r = crypto_shash_final(desc, result);
if (r < 0) {
DMERR("crypto_shash_final failed: %d", r);
return r;
}
if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
DMERR_LIMIT("data block %llu is corrupted",
(unsigned long long)(io->block + b));
v->hash_failed = 1;
return -EIO;
}
}
BUG_ON(vector != io->io_vec_size);
BUG_ON(offset);
return 0;
}
/*
* End one "io" structure with a given error.
*/
static void verity_finish_io(struct dm_verity_io *io, int error)
{
struct dm_verity *v = io->v;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
if (io->io_vec != io->io_vec_inline)
mempool_free(io->io_vec, v->vec_mempool);
bio_endio(bio, error);
}
static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
verity_finish_io(io, verity_verify_io(io));
}
static void verity_end_io(struct bio *bio, int error)
{
struct dm_verity_io *io = bio->bi_private;
if (error) {
verity_finish_io(io, error);
return;
}
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
}
/*
* Prefetch buffers for the specified io.
* The root buffer is not prefetched, it is assumed that it will be cached
* all the time.
*/
static void verity_prefetch_io(struct work_struct *work)
{
struct dm_verity_prefetch_work *pw =
container_of(work, struct dm_verity_prefetch_work, work);
struct dm_verity *v = pw->v;
int i;
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
sector_t hash_block_end;
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
goto no_prefetch_cluster;
if (unlikely(cluster & (cluster - 1)))
cluster = 1 << (fls(cluster) - 1);
hash_block_start &= ~(sector_t)(cluster - 1);
hash_block_end |= cluster - 1;
if (unlikely(hash_block_end >= v->hash_blocks))
hash_block_end = v->hash_blocks - 1;
}
no_prefetch_cluster:
dm_bufio_prefetch(v->bufio, hash_block_start,
hash_block_end - hash_block_start + 1);
}
kfree(pw);
}
static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
{
struct dm_verity_prefetch_work *pw;
pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (!pw)
return;
INIT_WORK(&pw->work, verity_prefetch_io);
pw->v = v;
pw->block = io->block;
pw->n_blocks = io->n_blocks;
queue_work(v->verify_wq, &pw->work);
}
/*
* Bio map function. It allocates dm_verity_io structure and bio vector and
* fills them. Then it issues prefetches and the I/O.
*/
static int verity_map(struct dm_target *ti, struct bio *bio)
{
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
bio->bi_bdev = v->data_dev->bdev;
bio->bi_sector = verity_map_sector(v, bio->bi_sector);
if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return -EIO;
}
if (bio_end_sector(bio) >>
(v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
DMERR_LIMIT("io out of range");
return -EIO;
}
if (bio_data_dir(bio) == WRITE)
return -EIO;
io = dm_per_bio_data(bio, ti->per_bio_data_size);
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
bio->bi_end_io = verity_end_io;
bio->bi_private = io;
io->io_vec_size = bio_segments(bio);
if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
io->io_vec = io->io_vec_inline;
else
io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
memcpy(io->io_vec, bio_iovec(bio),
io->io_vec_size * sizeof(struct bio_vec));
verity_submit_prefetch(v, io);
generic_make_request(bio);
return DM_MAPIO_SUBMITTED;
}
/*
* Status: V (valid) or C (corruption found)
*/
static void verity_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_verity *v = ti->private;
unsigned sz = 0;
unsigned x;
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%c", v->hash_failed ? 'C' : 'V');
break;
case STATUSTYPE_TABLE:
DMEMIT("%u %s %s %u %u %llu %llu %s ",
v->version,
v->data_dev->name,
v->hash_dev->name,
1 << v->data_dev_block_bits,
1 << v->hash_dev_block_bits,
(unsigned long long)v->data_blocks,
(unsigned long long)v->hash_start,
v->alg_name
);
for (x = 0; x < v->digest_size; x++)
DMEMIT("%02x", v->root_digest[x]);
DMEMIT(" ");
if (!v->salt_size)
DMEMIT("-");
else
for (x = 0; x < v->salt_size; x++)
DMEMIT("%02x", v->salt[x]);
break;
}
}
static int verity_ioctl(struct dm_target *ti, unsigned cmd,
unsigned long arg)
{
struct dm_verity *v = ti->private;
int r = 0;
if (v->data_start ||
ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
r = scsi_verify_blk_ioctl(NULL, cmd);
return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
cmd, arg);
}
static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size)
{
struct dm_verity *v = ti->private;
struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
if (!q->merge_bvec_fn)
return max_size;
bvm->bi_bdev = v->data_dev->bdev;
bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
static int verity_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dm_verity *v = ti->private;
return fn(ti, v->data_dev, v->data_start, ti->len, data);
}
static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dm_verity *v = ti->private;
if (limits->logical_block_size < 1 << v->data_dev_block_bits)
limits->logical_block_size = 1 << v->data_dev_block_bits;
if (limits->physical_block_size < 1 << v->data_dev_block_bits)
limits->physical_block_size = 1 << v->data_dev_block_bits;
blk_limits_io_min(limits, limits->logical_block_size);
}
static void verity_dtr(struct dm_target *ti)
{
struct dm_verity *v = ti->private;
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
if (v->vec_mempool)
mempool_destroy(v->vec_mempool);
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
kfree(v->salt);
kfree(v->root_digest);
if (v->tfm)
crypto_free_shash(v->tfm);
kfree(v->alg_name);
if (v->hash_dev)
dm_put_device(ti, v->hash_dev);
if (v->data_dev)
dm_put_device(ti, v->data_dev);
kfree(v);
}
/*
* Target parameters:
* <version> The current format is version 1.
* Vsn 0 is compatible with original Chromium OS releases.
* <data device>
* <hash device>
* <data block size>
* <hash block size>
* <the number of data blocks>
* <hash start block>
* <algorithm>
* <digest>
* <salt> Hex string or "-" if no salt.
*/
static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
struct dm_verity *v;
unsigned num;
unsigned long long num_ll;
int r;
int i;
sector_t hash_position;
char dummy;
v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
if (!v) {
ti->error = "Cannot allocate verity structure";
return -ENOMEM;
}
ti->private = v;
v->ti = ti;
if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
ti->error = "Device must be readonly";
r = -EINVAL;
goto bad;
}
if (argc != 10) {
ti->error = "Invalid argument count: exactly 10 arguments required";
r = -EINVAL;
goto bad;
}
if (sscanf(argv[0], "%d%c", &num, &dummy) != 1 ||
num < 0 || num > 1) {
ti->error = "Invalid version";
r = -EINVAL;
goto bad;
}
v->version = num;
r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
if (r) {
ti->error = "Data device lookup failed";
goto bad;
}
r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
if (r) {
ti->error = "Data device lookup failed";
goto bad;
}
if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
!num || (num & (num - 1)) ||
num < bdev_logical_block_size(v->data_dev->bdev) ||
num > PAGE_SIZE) {
ti->error = "Invalid data device block size";
r = -EINVAL;
goto bad;
}
v->data_dev_block_bits = ffs(num) - 1;
if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
!num || (num & (num - 1)) ||
num < bdev_logical_block_size(v->hash_dev->bdev) ||
num > INT_MAX) {
ti->error = "Invalid hash device block size";
r = -EINVAL;
goto bad;
}
v->hash_dev_block_bits = ffs(num) - 1;
if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
(sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
>> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid data blocks";
r = -EINVAL;
goto bad;
}
v->data_blocks = num_ll;
if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
ti->error = "Data device is too small";
r = -EINVAL;
goto bad;
}
if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
(sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
>> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
ti->error = "Invalid hash start";
r = -EINVAL;
goto bad;
}
v->hash_start = num_ll;
v->alg_name = kstrdup(argv[7], GFP_KERNEL);
if (!v->alg_name) {
ti->error = "Cannot allocate algorithm name";
r = -ENOMEM;
goto bad;
}
v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
v->tfm = NULL;
goto bad;
}
v->digest_size = crypto_shash_digestsize(v->tfm);
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
ti->error = "Digest size too big";
r = -EINVAL;
goto bad;
}
v->shash_descsize =
sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
if (!v->root_digest) {
ti->error = "Cannot allocate root digest";
r = -ENOMEM;
goto bad;
}
if (strlen(argv[8]) != v->digest_size * 2 ||
hex2bin(v->root_digest, argv[8], v->digest_size)) {
ti->error = "Invalid root digest";
r = -EINVAL;
goto bad;
}
if (strcmp(argv[9], "-")) {
v->salt_size = strlen(argv[9]) / 2;
v->salt = kmalloc(v->salt_size, GFP_KERNEL);
if (!v->salt) {
ti->error = "Cannot allocate salt";
r = -ENOMEM;
goto bad;
}
if (strlen(argv[9]) != v->salt_size * 2 ||
hex2bin(v->salt, argv[9], v->salt_size)) {
ti->error = "Invalid salt";
r = -EINVAL;
goto bad;
}
}
v->hash_per_block_bits =
fls((1 << v->hash_dev_block_bits) / v->digest_size) - 1;
v->levels = 0;
if (v->data_blocks)
while (v->hash_per_block_bits * v->levels < 64 &&
(unsigned long long)(v->data_blocks - 1) >>
(v->hash_per_block_bits * v->levels))
v->levels++;
if (v->levels > DM_VERITY_MAX_LEVELS) {
ti->error = "Too many tree levels";
r = -E2BIG;
goto bad;
}
hash_position = v->hash_start;
for (i = v->levels - 1; i >= 0; i--) {
sector_t s;
v->hash_level_block[i] = hash_position;
s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
>> ((i + 1) * v->hash_per_block_bits);
if (hash_position + s < hash_position) {
ti->error = "Hash device offset overflow";
r = -E2BIG;
goto bad;
}
hash_position += s;
}
v->hash_blocks = hash_position;
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
dm_bufio_alloc_callback, NULL);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
v->bufio = NULL;
goto bad;
}
if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
ti->error = "Hash device is too small";
r = -E2BIG;
goto bad;
}
ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
BIO_MAX_PAGES * sizeof(struct bio_vec));
if (!v->vec_mempool) {
ti->error = "Cannot allocate vector mempool";
r = -ENOMEM;
goto bad;
}
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
goto bad;
}
return 0;
bad:
verity_dtr(ti);
return r;
}
static struct target_type verity_target = {
.name = "verity",
.version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
.map = verity_map,
.status = verity_status,
.ioctl = verity_ioctl,
.merge = verity_merge,
.iterate_devices = verity_iterate_devices,
.io_hints = verity_io_hints,
};
static int __init dm_verity_init(void)
{
int r;
r = dm_register_target(&verity_target);
if (r < 0)
DMERR("register failed %d", r);
return r;
}
static void __exit dm_verity_exit(void)
{
dm_unregister_target(&verity_target);
}
module_init(dm_verity_init);
module_exit(dm_verity_exit);
MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DirtyUnicorns/android_kernel_lge_gee | fs/ext3/dir.c | 1019 | 15730 | /*
* linux/fs/ext3/dir.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/dir.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* ext3 directory handling functions
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*
* Hash Tree Directory indexing (c) 2001 Daniel Phillips
*
*/
#include <linux/compat.h>
#include "ext3.h"
static unsigned char ext3_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
static int ext3_dx_readdir(struct file * filp,
void * dirent, filldir_t filldir);
static unsigned char get_dtype(struct super_block *sb, int filetype)
{
if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
(filetype >= EXT3_FT_MAX))
return DT_UNKNOWN;
return (ext3_filetype_table[filetype]);
}
/**
* Check if the given dir-inode refers to an htree-indexed directory
* (or a directory which chould potentially get coverted to use htree
* indexing).
*
* Return 1 if it is a dx dir, 0 if not
*/
static int is_dx_dir(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
EXT3_FEATURE_COMPAT_DIR_INDEX) &&
((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
((inode->i_size >> sb->s_blocksize_bits) == 1)))
return 1;
return 0;
}
int ext3_check_dir_entry (const char * function, struct inode * dir,
struct ext3_dir_entry_2 * de,
struct buffer_head * bh,
unsigned long offset)
{
const char * error_msg = NULL;
const int rlen = ext3_rec_len_from_disk(de->rec_len);
if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
error_msg = "rec_len is smaller than minimal";
else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
error_msg = "directory entry across blocks";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
if (unlikely(error_msg != NULL))
ext3_error (dir->i_sb, function,
"bad entry in directory #%lu: %s - "
"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
dir->i_ino, error_msg, offset,
(unsigned long) le32_to_cpu(de->inode),
rlen, de->name_len);
return error_msg == NULL ? 1 : 0;
}
static int ext3_readdir(struct file * filp,
void * dirent, filldir_t filldir)
{
int error = 0;
unsigned long offset;
int i, stored;
struct ext3_dir_entry_2 *de;
int err;
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
int ret = 0;
int dir_has_error = 0;
if (is_dx_dir(inode)) {
err = ext3_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
ret = err;
goto out;
}
/*
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
EXT3_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL;
}
stored = 0;
offset = filp->f_pos & (sb->s_blocksize - 1);
while (!error && !stored && filp->f_pos < inode->i_size) {
unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
struct buffer_head map_bh;
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
if (err > 0) {
pgoff_t index = map_bh.b_blocknr >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
if (!ra_has_index(&filp->f_ra, index))
page_cache_sync_readahead(
sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra, filp,
index, 1);
filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext3_bread(NULL, inode, blk, 0, &err);
}
/*
* We ignore I/O errors on directories so users have a chance
* of recovering data when there's a bad sector
*/
if (!bh) {
if (!dir_has_error) {
ext3_error(sb, __func__, "directory #%lu "
"contains a hole at offset %lld",
inode->i_ino, filp->f_pos);
dir_has_error = 1;
}
/* corrupt size? Maybe no more blocks to read */
if (filp->f_pos > inode->i_blocks << 9)
break;
filp->f_pos += sb->s_blocksize - offset;
continue;
}
revalidate:
/* If the dir block has changed since the last call to
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
if (filp->f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext3_dir_entry_2 *)
(bh->b_data + i);
/* It's too expensive to do a full
* dirent test each time round this
* loop, but we do have to test at
* least that it is non-zero. A
* failure will be detected in the
* dirent test below. */
if (ext3_rec_len_from_disk(de->rec_len) <
EXT3_DIR_REC_LEN(1))
break;
i += ext3_rec_len_from_disk(de->rec_len);
}
offset = i;
filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
| offset;
filp->f_version = inode->i_version;
}
while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
bh, offset)) {
/* On error, skip the f_pos to the
next block. */
filp->f_pos = (filp->f_pos |
(sb->s_blocksize - 1)) + 1;
brelse (bh);
ret = stored;
goto out;
}
offset += ext3_rec_len_from_disk(de->rec_len);
if (le32_to_cpu(de->inode)) {
/* We might block in the next section
* if the data destination is
* currently swapped out. So, use a
* version stamp to detect whether or
* not the directory has been modified
* during the copy operation.
*/
u64 version = filp->f_version;
error = filldir(dirent, de->name,
de->name_len,
filp->f_pos,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type));
if (error)
break;
if (version != filp->f_version)
goto revalidate;
stored ++;
}
filp->f_pos += ext3_rec_len_from_disk(de->rec_len);
}
offset = 0;
brelse (bh);
}
out:
return ret;
}
static inline int is_32bit_api(void)
{
#ifdef CONFIG_COMPAT
return is_compat_task();
#else
return (BITS_PER_LONG == 32);
#endif
}
/*
* These functions convert from the major/minor hash to an f_pos
* value for dx directories
*
* Upper layer (for example NFS) should specify FMODE_32BITHASH or
* FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
* directly on both 32-bit and 64-bit nodes, under such case, neither
* FMODE_32BITHASH nor FMODE_64BITHASH is specified.
*/
static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return major >> 1;
else
return ((__u64)(major >> 1) << 32) | (__u64)minor;
}
static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return (pos << 1) & 0xffffffff;
else
return ((pos >> 32) << 1) & 0xffffffff;
}
static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return 0;
else
return pos & 0xffffffff;
}
/*
* Return 32- or 64-bit end-of-file for dx directories
*/
static inline loff_t ext3_get_htree_eof(struct file *filp)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return EXT3_HTREE_EOF_32BIT;
else
return EXT3_HTREE_EOF_64BIT;
}
/*
* ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
* non-htree and htree directories, where the "offset" is in terms
* of the filename hash value instead of the byte offset.
*
* Because we may return a 64-bit hash that is well beyond s_maxbytes,
* we need to pass the max hash as the maximum allowable offset in
* the htree directory case.
*
* NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
* will be invalid once the directory was converted into a dx directory
*/
loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_mapping->host;
int dx_dir = is_dx_dir(inode);
if (likely(dx_dir))
return generic_file_llseek_size(file, offset, origin,
ext3_get_htree_eof(file));
else
return generic_file_llseek(file, offset, origin);
}
/*
* This structure holds the nodes of the red-black tree used to store
* the directory entry in hash order.
*/
struct fname {
__u32 hash;
__u32 minor_hash;
struct rb_node rb_hash;
struct fname *next;
__u32 inode;
__u8 name_len;
__u8 file_type;
char name[0];
};
/*
* This functoin implements a non-recursive way of freeing all of the
* nodes in the red-black tree.
*/
static void free_rb_tree_fname(struct rb_root *root)
{
struct rb_node *n = root->rb_node;
struct rb_node *parent;
struct fname *fname;
while (n) {
/* Do the node's children first */
if (n->rb_left) {
n = n->rb_left;
continue;
}
if (n->rb_right) {
n = n->rb_right;
continue;
}
/*
* The node has no children; free it, and then zero
* out parent's link to it. Finally go to the
* beginning of the loop and try to free the parent
* node.
*/
parent = rb_parent(n);
fname = rb_entry(n, struct fname, rb_hash);
while (fname) {
struct fname * old = fname;
fname = fname->next;
kfree (old);
}
if (!parent)
*root = RB_ROOT;
else if (parent->rb_left == n)
parent->rb_left = NULL;
else if (parent->rb_right == n)
parent->rb_right = NULL;
n = parent;
}
}
static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
loff_t pos)
{
struct dir_private_info *p;
p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
if (!p)
return NULL;
p->curr_hash = pos2maj_hash(filp, pos);
p->curr_minor_hash = pos2min_hash(filp, pos);
return p;
}
void ext3_htree_free_dir_info(struct dir_private_info *p)
{
free_rb_tree_fname(&p->root);
kfree(p);
}
/*
* Given a directory entry, enter it into the fname rb tree.
*/
int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext3_dir_entry_2 *dirent)
{
struct rb_node **p, *parent = NULL;
struct fname * fname, *new_fn;
struct dir_private_info *info;
int len;
info = (struct dir_private_info *) dir_file->private_data;
p = &info->root.rb_node;
/* Create and allocate the fname structure */
len = sizeof(struct fname) + dirent->name_len + 1;
new_fn = kzalloc(len, GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
new_fn->hash = hash;
new_fn->minor_hash = minor_hash;
new_fn->inode = le32_to_cpu(dirent->inode);
new_fn->name_len = dirent->name_len;
new_fn->file_type = dirent->file_type;
memcpy(new_fn->name, dirent->name, dirent->name_len);
new_fn->name[dirent->name_len] = 0;
while (*p) {
parent = *p;
fname = rb_entry(parent, struct fname, rb_hash);
/*
* If the hash and minor hash match up, then we put
* them on a linked list. This rarely happens...
*/
if ((new_fn->hash == fname->hash) &&
(new_fn->minor_hash == fname->minor_hash)) {
new_fn->next = fname->next;
fname->next = new_fn;
return 0;
}
if (new_fn->hash < fname->hash)
p = &(*p)->rb_left;
else if (new_fn->hash > fname->hash)
p = &(*p)->rb_right;
else if (new_fn->minor_hash < fname->minor_hash)
p = &(*p)->rb_left;
else /* if (new_fn->minor_hash > fname->minor_hash) */
p = &(*p)->rb_right;
}
rb_link_node(&new_fn->rb_hash, parent, p);
rb_insert_color(&new_fn->rb_hash, &info->root);
return 0;
}
/*
* This is a helper function for ext3_dx_readdir. It calls filldir
* for all entres on the fname linked list. (Normally there is only
* one entry on the linked list, unless there are 62 bit hash collisions.)
*/
static int call_filldir(struct file * filp, void * dirent,
filldir_t filldir, struct fname *fname)
{
struct dir_private_info *info = filp->private_data;
loff_t curr_pos;
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block * sb;
int error;
sb = inode->i_sb;
if (!fname) {
printk("call_filldir: called with null fname?!?\n");
return 0;
}
curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
while (fname) {
error = filldir(dirent, fname->name,
fname->name_len, curr_pos,
fname->inode,
get_dtype(sb, fname->file_type));
if (error) {
filp->f_pos = curr_pos;
info->extra_fname = fname;
return error;
}
fname = fname->next;
}
return 0;
}
static int ext3_dx_readdir(struct file * filp,
void * dirent, filldir_t filldir)
{
struct dir_private_info *info = filp->private_data;
struct inode *inode = filp->f_path.dentry->d_inode;
struct fname *fname;
int ret;
if (!info) {
info = ext3_htree_create_dir_info(filp, filp->f_pos);
if (!info)
return -ENOMEM;
filp->private_data = info;
}
if (filp->f_pos == ext3_get_htree_eof(filp))
return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */
if (info->last_pos != filp->f_pos) {
free_rb_tree_fname(&info->root);
info->curr_node = NULL;
info->extra_fname = NULL;
info->curr_hash = pos2maj_hash(filp, filp->f_pos);
info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
}
/*
* If there are any leftover names on the hash collision
* chain, return them first.
*/
if (info->extra_fname) {
if (call_filldir(filp, dirent, filldir, info->extra_fname))
goto finished;
info->extra_fname = NULL;
goto next_node;
} else if (!info->curr_node)
info->curr_node = rb_first(&info->root);
while (1) {
/*
* Fill the rbtree if we have no more entries,
* or the inode has changed since we last read in the
* cached entries.
*/
if ((!info->curr_node) ||
(filp->f_version != inode->i_version)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
filp->f_version = inode->i_version;
ret = ext3_htree_fill_tree(filp, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
if (ret < 0)
return ret;
if (ret == 0) {
filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_node = rb_first(&info->root);
}
fname = rb_entry(info->curr_node, struct fname, rb_hash);
info->curr_hash = fname->hash;
info->curr_minor_hash = fname->minor_hash;
if (call_filldir(filp, dirent, filldir, fname))
break;
next_node:
info->curr_node = rb_next(info->curr_node);
if (info->curr_node) {
fname = rb_entry(info->curr_node, struct fname,
rb_hash);
info->curr_hash = fname->hash;
info->curr_minor_hash = fname->minor_hash;
} else {
if (info->next_hash == ~0) {
filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_hash = info->next_hash;
info->curr_minor_hash = 0;
}
}
finished:
info->last_pos = filp->f_pos;
return 0;
}
static int ext3_release_dir (struct inode * inode, struct file * filp)
{
if (filp->private_data)
ext3_htree_free_dir_info(filp->private_data);
return 0;
}
const struct file_operations ext3_dir_operations = {
.llseek = ext3_dir_llseek,
.read = generic_read_dir,
.readdir = ext3_readdir,
.unlocked_ioctl = ext3_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext3_compat_ioctl,
#endif
.fsync = ext3_sync_file,
.release = ext3_release_dir,
};
| gpl-2.0 |
NeverLEX/linux | arch/openrisc/kernel/irq.c | 1787 | 1396 | /*
* OpenRISC irq.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/export.h>
#include <linux/irqflags.h>
/* read interrupt enabled status */
unsigned long arch_local_save_flags(void)
{
return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE);
}
EXPORT_SYMBOL(arch_local_save_flags);
/* set interrupt enabled status */
void arch_local_irq_restore(unsigned long flags)
{
mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags));
}
EXPORT_SYMBOL(arch_local_irq_restore);
void __init init_IRQ(void)
{
irqchip_init();
}
static void (*handle_arch_irq)(struct pt_regs *);
void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
{
handle_arch_irq = handle_irq;
}
void __irq_entry do_IRQ(struct pt_regs *regs)
{
handle_arch_irq(regs);
}
| gpl-2.0 |
rdm-dev/linux-curie | arch/mips/dec/kn02xa-berr.c | 2043 | 3957 | /*
* Bus error event handling code for 5000-series systems equipped
* with parity error detection logic, i.e. DECstation/DECsystem
* 5000/120, /125, /133 (KN02-BA), 5000/150 (KN04-BA) and Personal
* DECstation/DECsystem 5000/20, /25, /33 (KN02-CA), 5000/50
* (KN04-CA) systems.
*
* Copyright (c) 2005 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/cpu-type.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/dec/kn02ca.h>
#include <asm/dec/kn02xa.h>
#include <asm/dec/kn05.h>
static inline void dec_kn02xa_be_ack(void)
{
volatile u32 *mer = (void *)CKSEG1ADDR(KN02XA_MER);
volatile u32 *mem_intr = (void *)CKSEG1ADDR(KN02XA_MEM_INTR);
*mer = KN02CA_MER_INTR; /* Clear errors; keep the ARC IRQ. */
*mem_intr = 0; /* Any write clears the bus IRQ. */
iob();
}
static int dec_kn02xa_be_backend(struct pt_regs *regs, int is_fixup,
int invoker)
{
volatile u32 *kn02xa_mer = (void *)CKSEG1ADDR(KN02XA_MER);
volatile u32 *kn02xa_ear = (void *)CKSEG1ADDR(KN02XA_EAR);
static const char excstr[] = "exception";
static const char intstr[] = "interrupt";
static const char cpustr[] = "CPU";
static const char mreadstr[] = "memory read";
static const char readstr[] = "read";
static const char writestr[] = "write";
static const char timestr[] = "timeout";
static const char paritystr[] = "parity error";
static const char lanestat[][4] = { " OK", "BAD" };
const char *kind, *agent, *cycle, *event;
unsigned long address;
u32 mer = *kn02xa_mer;
u32 ear = *kn02xa_ear;
int action = MIPS_BE_FATAL;
/* Ack ASAP, so that any subsequent errors get caught. */
dec_kn02xa_be_ack();
kind = invoker ? intstr : excstr;
/* No DMA errors? */
agent = cpustr;
address = ear & KN02XA_EAR_ADDRESS;
/* Low 256MB is decoded as memory, high -- as TC. */
if (address < 0x10000000) {
cycle = mreadstr;
event = paritystr;
} else {
cycle = invoker ? writestr : readstr;
event = timestr;
}
if (is_fixup)
action = MIPS_BE_FIXUP;
if (action != MIPS_BE_FIXUP)
printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
kind, agent, cycle, event, address);
if (action != MIPS_BE_FIXUP && address < 0x10000000)
printk(KERN_ALERT " Byte lane status %#3x -- "
"#3: %s, #2: %s, #1: %s, #0: %s\n",
(mer & KN02XA_MER_BYTERR) >> 8,
lanestat[(mer & KN02XA_MER_BYTERR_3) != 0],
lanestat[(mer & KN02XA_MER_BYTERR_2) != 0],
lanestat[(mer & KN02XA_MER_BYTERR_1) != 0],
lanestat[(mer & KN02XA_MER_BYTERR_0) != 0]);
return action;
}
int dec_kn02xa_be_handler(struct pt_regs *regs, int is_fixup)
{
return dec_kn02xa_be_backend(regs, is_fixup, 0);
}
irqreturn_t dec_kn02xa_be_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
int action = dec_kn02xa_be_backend(regs, 0, 1);
if (action == MIPS_BE_DISCARD)
return IRQ_HANDLED;
/*
* FIXME: Find the affected processes and kill them, otherwise
* we must die.
*
* The interrupt is asynchronously delivered thus EPC and RA
* may be irrelevant, but are printed for a reference.
*/
printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
regs->cp0_epc, regs->regs[31]);
die("Unrecoverable bus error", regs);
}
void __init dec_kn02xa_be_init(void)
{
volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
/* For KN04 we need to make sure EE (?) is enabled in the MB. */
if (current_cpu_type() == CPU_R4000SC)
*mbcs |= KN4K_MB_CSR_EE;
fast_iob();
/* Clear any leftover errors from the firmware. */
dec_kn02xa_be_ack();
}
| gpl-2.0 |
BlackBox-Kernel/blackbox_tomato_lp | crypto/async_tx/async_tx.c | 2299 | 7839 | /*
* core routines for the asynchronous memory transfer/transform api
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <dan.j.williams@intel.com>
*
* with architecture considerations by:
* Neil Brown <neilb@suse.de>
* Jeff Garzik <jeff@garzik.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/rculist.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/async_tx.h>
#ifdef CONFIG_DMA_ENGINE
static int __init async_tx_init(void)
{
async_dmaengine_get();
printk(KERN_INFO "async_tx: api initialized (async)\n");
return 0;
}
static void __exit async_tx_exit(void)
{
async_dmaengine_put();
}
module_init(async_tx_init);
module_exit(async_tx_exit);
/**
* __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
* @submit: transaction dependency and submission modifiers
* @tx_type: transaction type
*/
struct dma_chan *
__async_tx_find_channel(struct async_submit_ctl *submit,
enum dma_transaction_type tx_type)
{
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
/* see if we can keep the chain on one channel */
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
return depend_tx->chan;
return async_dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#endif
/**
* async_tx_channel_switch - queue an interrupt descriptor with a dependency
* pre-attached.
* @depend_tx: the operation that must finish before the new operation runs
* @tx: the new operation
*/
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx)
{
struct dma_chan *chan = depend_tx->chan;
struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
/* first check to see if we can still append to depend_tx */
txd_lock(depend_tx);
if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
txd_chain(depend_tx, tx);
intr_tx = NULL;
}
txd_unlock(depend_tx);
/* attached dependency, flush the parent channel */
if (!intr_tx) {
device->device_issue_pending(chan);
return;
}
/* see if we can schedule an interrupt
* otherwise poll for completion
*/
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
intr_tx = device->device_prep_dma_interrupt(chan, 0);
else
intr_tx = NULL;
if (intr_tx) {
intr_tx->callback = NULL;
intr_tx->callback_param = NULL;
/* safe to chain outside the lock since we know we are
* not submitted yet
*/
txd_chain(intr_tx, tx);
/* check if we need to append */
txd_lock(depend_tx);
if (txd_parent(depend_tx)) {
txd_chain(depend_tx, intr_tx);
async_tx_ack(intr_tx);
intr_tx = NULL;
}
txd_unlock(depend_tx);
if (intr_tx) {
txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
device->device_issue_pending(chan);
} else {
if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
}
}
/**
* submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
*
* while holding depend_tx->lock we must avoid submitting new operations
* to prevent a circular locking dependency with drivers that already
* hold a channel lock when calling async_tx_run_dependencies.
*/
enum submit_disposition {
ASYNC_TX_SUBMITTED,
ASYNC_TX_CHANNEL_SWITCH,
ASYNC_TX_DIRECT_SUBMIT,
};
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
tx->callback = submit->cb_fn;
tx->callback_param = submit->cb_param;
if (depend_tx) {
enum submit_disposition s;
/* sanity check the dependency chain:
* 1/ if ack is already set then we cannot be sure
* we are referring to the correct operation
* 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent
*/
BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL
*/
txd_lock(depend_tx);
if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly
* if we are staying on the same channel: append
* else: channel switch
*/
if (depend_tx->chan == chan) {
txd_chain(depend_tx, tx);
s = ASYNC_TX_SUBMITTED;
} else
s = ASYNC_TX_CHANNEL_SWITCH;
} else {
/* we do not have a parent so we may be able to submit
* directly if we are staying on the same channel
*/
if (depend_tx->chan == chan)
s = ASYNC_TX_DIRECT_SUBMIT;
else
s = ASYNC_TX_CHANNEL_SWITCH;
}
txd_unlock(depend_tx);
switch (s) {
case ASYNC_TX_SUBMITTED:
break;
case ASYNC_TX_CHANNEL_SWITCH:
async_tx_channel_switch(depend_tx, tx);
break;
case ASYNC_TX_DIRECT_SUBMIT:
txd_clear_parent(tx);
tx->tx_submit(tx);
break;
}
} else {
txd_clear_parent(tx);
tx->tx_submit(tx);
}
if (submit->flags & ASYNC_TX_ACK)
async_tx_ack(tx);
if (depend_tx)
async_tx_ack(depend_tx);
}
EXPORT_SYMBOL_GPL(async_tx_submit);
/**
* async_trigger_callback - schedules the callback function to be run
* @submit: submission and completion parameters
*
* honored flags: ASYNC_TX_ACK
*
* The callback is run after any dependent operations have completed.
*/
struct dma_async_tx_descriptor *
async_trigger_callback(struct async_submit_ctl *submit)
{
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *tx;
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
if (depend_tx) {
chan = depend_tx->chan;
device = chan->device;
/* see if we can schedule an interrupt
* otherwise poll for completion
*/
if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
device = NULL;
tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
} else
tx = NULL;
if (tx) {
pr_debug("%s: (async)\n", __func__);
async_tx_submit(chan, tx, submit);
} else {
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_trigger_callback);
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
* @tx - transaction to quiesce
*/
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
if (*tx) {
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);
*tx = NULL;
}
}
EXPORT_SYMBOL_GPL(async_tx_quiesce);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jeboo/kernel_JB_ZSLS6_i9100 | arch/arm/mach-omap2/sdrc2xxx.c | 3067 | 4386 | /*
* linux/arch/arm/mach-omap2/sdrc2xxx.c
*
* SDRAM timing related functions for OMAP2xxx
*
* Copyright (C) 2005, 2008 Texas Instruments Inc.
* Copyright (C) 2005, 2008 Nokia Corporation
*
* Tony Lindgren <tony@atomide.com>
* Paul Walmsley
* Richard Woodruff <r-woodruff2@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <plat/common.h>
#include <plat/clock.h>
#include <plat/sram.h>
#include "prm2xxx_3xxx.h"
#include "clock.h"
#include <plat/sdrc.h>
#include "sdrc.h"
/* Memory timing, DLL mode flags */
#define M_DDR 1
#define M_LOCK_CTRL (1 << 2)
#define M_UNLOCK 0
#define M_LOCK 1
static struct memory_timings mem_timings;
static u32 curr_perf_level = CORE_CLK_SRC_DPLL_X2;
static u32 omap2xxx_sdrc_get_slow_dll_ctrl(void)
{
return mem_timings.slow_dll_ctrl;
}
static u32 omap2xxx_sdrc_get_fast_dll_ctrl(void)
{
return mem_timings.fast_dll_ctrl;
}
static u32 omap2xxx_sdrc_get_type(void)
{
return mem_timings.m_type;
}
/*
* Check the DLL lock state, and return tue if running in unlock mode.
* This is needed to compensate for the shifted DLL value in unlock mode.
*/
u32 omap2xxx_sdrc_dll_is_unlocked(void)
{
/* dlla and dllb are a set */
u32 dll_state = sdrc_read_reg(SDRC_DLLA_CTRL);
if ((dll_state & (1 << 2)) == (1 << 2))
return 1;
else
return 0;
}
/*
* 'level' is the value to store to CM_CLKSEL2_PLL.CORE_CLK_SRC.
* Practical values are CORE_CLK_SRC_DPLL (for CORE_CLK = DPLL_CLK) or
* CORE_CLK_SRC_DPLL_X2 (for CORE_CLK = * DPLL_CLK * 2)
*
* Used by the clock framework during CORE DPLL changes
*/
u32 omap2xxx_sdrc_reprogram(u32 level, u32 force)
{
u32 dll_ctrl, m_type;
u32 prev = curr_perf_level;
unsigned long flags;
if ((curr_perf_level == level) && !force)
return prev;
if (level == CORE_CLK_SRC_DPLL)
dll_ctrl = omap2xxx_sdrc_get_slow_dll_ctrl();
else if (level == CORE_CLK_SRC_DPLL_X2)
dll_ctrl = omap2xxx_sdrc_get_fast_dll_ctrl();
else
return prev;
m_type = omap2xxx_sdrc_get_type();
local_irq_save(flags);
/*
* XXX These calls should be abstracted out through a
* prm2xxx.c function
*/
if (cpu_is_omap2420())
__raw_writel(0xffff, OMAP2420_PRCM_VOLTSETUP);
else
__raw_writel(0xffff, OMAP2430_PRCM_VOLTSETUP);
omap2_sram_reprogram_sdrc(level, dll_ctrl, m_type);
curr_perf_level = level;
local_irq_restore(flags);
return prev;
}
/* Used by the clock framework during CORE DPLL changes */
void omap2xxx_sdrc_init_params(u32 force_lock_to_unlock_mode)
{
unsigned long dll_cnt;
u32 fast_dll = 0;
/* DDR = 1, SDR = 0 */
mem_timings.m_type = !((sdrc_read_reg(SDRC_MR_0) & 0x3) == 0x1);
/* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others.
* In the case of 2422, its ok to use CS1 instead of CS0.
*/
if (cpu_is_omap2422())
mem_timings.base_cs = 1;
else
mem_timings.base_cs = 0;
if (mem_timings.m_type != M_DDR)
return;
/* With DDR we need to determine the low frequency DLL value */
if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL))
mem_timings.dll_mode = M_UNLOCK;
else
mem_timings.dll_mode = M_LOCK;
if (mem_timings.base_cs == 0) {
fast_dll = sdrc_read_reg(SDRC_DLLA_CTRL);
dll_cnt = sdrc_read_reg(SDRC_DLLA_STATUS) & 0xff00;
} else {
fast_dll = sdrc_read_reg(SDRC_DLLB_CTRL);
dll_cnt = sdrc_read_reg(SDRC_DLLB_STATUS) & 0xff00;
}
if (force_lock_to_unlock_mode) {
fast_dll &= ~0xff00;
fast_dll |= dll_cnt; /* Current lock mode */
}
/* set fast timings with DLL filter disabled */
mem_timings.fast_dll_ctrl = (fast_dll | (3 << 8));
/* No disruptions, DDR will be offline & C-ABI not followed */
omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl,
mem_timings.fast_dll_ctrl,
mem_timings.base_cs,
force_lock_to_unlock_mode);
mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */
/* Turn status into unlock ctrl */
mem_timings.slow_dll_ctrl |=
((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2));
/* 90 degree phase for anything below 133Mhz + disable DLL filter */
mem_timings.slow_dll_ctrl |= ((1 << 1) | (3 << 8));
}
| gpl-2.0 |
nspierbundel/amlogic-common | drivers/media/video/uvc/uvc_isight.c | 3323 | 3953 | /*
* uvc_isight.c -- USB Video Class driver - iSight support
*
* Copyright (C) 2006-2007
* Ivan N. Zlatev <contact@i-nz.net>
* Copyright (C) 2008-2009
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/usb.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include "uvcvideo.h"
/* Built-in iSight webcams implements most of UVC 1.0 except a
* different packet format. Instead of sending a header at the
* beginning of each isochronous transfer payload, the webcam sends a
* single header per image (on its own in a packet), followed by
* packets containing data only.
*
* Offset Size (bytes) Description
* ------------------------------------------------------------------
* 0x00 1 Header length
* 0x01 1 Flags (UVC-compliant)
* 0x02 4 Always equal to '11223344'
* 0x06 8 Always equal to 'deadbeefdeadface'
* 0x0e 16 Unknown
*
* The header can be prefixed by an optional, unknown-purpose byte.
*/
static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
const __u8 *data, unsigned int len)
{
static const __u8 hdr[] = {
0x11, 0x22, 0x33, 0x44,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xfa, 0xce
};
unsigned int maxlen, nbytes;
__u8 *mem;
int is_header = 0;
if (buf == NULL)
return 0;
if ((len >= 14 && memcmp(&data[2], hdr, 12) == 0) ||
(len >= 15 && memcmp(&data[3], hdr, 12) == 0)) {
uvc_trace(UVC_TRACE_FRAME, "iSight header found\n");
is_header = 1;
}
/* Synchronize to the input stream by waiting for a header packet. */
if (buf->state != UVC_BUF_STATE_ACTIVE) {
if (!is_header) {
uvc_trace(UVC_TRACE_FRAME, "Dropping packet (out of "
"sync).\n");
return 0;
}
buf->state = UVC_BUF_STATE_ACTIVE;
}
/* Mark the buffer as done if we're at the beginning of a new frame.
*
* Empty buffers (bytesused == 0) don't trigger end of frame detection
* as it doesn't make sense to return an empty buffer.
*/
if (is_header && buf->buf.bytesused != 0) {
buf->state = UVC_BUF_STATE_DONE;
return -EAGAIN;
}
/* Copy the video data to the buffer. Skip header packets, as they
* contain no data.
*/
if (!is_header) {
maxlen = buf->buf.length - buf->buf.bytesused;
mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused;
nbytes = min(len, maxlen);
memcpy(mem, data, nbytes);
buf->buf.bytesused += nbytes;
if (len > maxlen || buf->buf.bytesused == buf->buf.length) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete "
"(overflow).\n");
buf->state = UVC_BUF_STATE_DONE;
}
}
return 0;
}
void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
struct uvc_buffer *buf)
{
int ret, i;
for (i = 0; i < urb->number_of_packets; ++i) {
if (urb->iso_frame_desc[i].status < 0) {
uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame "
"lost (%d).\n",
urb->iso_frame_desc[i].status);
}
/* Decode the payload packet.
* uvc_video_decode is entered twice when a frame transition
* has been detected because the end of frame can only be
* reliably detected when the first packet of the new frame
* is processed. The first pass detects the transition and
* closes the previous frame's buffer, the second pass
* processes the data of the first payload of the new frame.
*/
do {
ret = isight_decode(&stream->queue, buf,
urb->transfer_buffer +
urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
if (buf == NULL)
break;
if (buf->state == UVC_BUF_STATE_DONE ||
buf->state == UVC_BUF_STATE_ERROR)
buf = uvc_queue_next_buffer(&stream->queue,
buf);
} while (ret == -EAGAIN);
}
}
| gpl-2.0 |
upndwn4par/android_kernel_lge_hammerhead | fs/nfs/nfs4filelayout.c | 3835 | 31034 | /*
* Module for the pnfs nfs4 file layout driver.
* Defines all I/O and Policy interface operations, plus code
* to register itself with the pNFS client.
*
* Copyright (c) 2002
* The Regents of the University of Michigan
* All Rights Reserved
*
* Dean Hildebrand <dhildebz@umich.edu>
*
* Permission is granted to use, copy, create derivative works, and
* redistribute this software and such derivative works for any purpose,
* so long as the name of the University of Michigan is not used in
* any advertising or publicity pertaining to the use or distribution
* of this software without specific, written prior authorization. If
* the above copyright notice or any other identification of the
* University of Michigan is included in any copy of any portion of
* this software, then the disclaimer below must also be included.
*
* This software is provided as is, without representation or warranty
* of any kind either express or implied, including without limitation
* the implied warranties of merchantability, fitness for a particular
* purpose, or noninfringement. The Regents of the University of
* Michigan shall not be liable for any damages, including special,
* indirect, incidental, or consequential damages, with respect to any
* claim arising out of or in connection with the use of the software,
* even if it has been or is hereafter advised of the possibility of
* such damages.
*/
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include <linux/sunrpc/metrics.h>
#include "internal.h"
#include "delegation.h"
#include "nfs4filelayout.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
MODULE_DESCRIPTION("The NFSv4 file layout driver");
#define FILELAYOUT_POLL_RETRY_MAX (15*HZ)
static loff_t
filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
loff_t offset)
{
u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
u64 stripe_no;
u32 rem;
offset -= flseg->pattern_offset;
stripe_no = div_u64(offset, stripe_width);
div_u64_rem(offset, flseg->stripe_unit, &rem);
return stripe_no * flseg->stripe_unit + rem;
}
/* This function is used by the layout driver to calculate the
* offset of the file on the dserver based on whether the
* layout type is STRIPE_DENSE or STRIPE_SPARSE
*/
static loff_t
filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
{
struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
switch (flseg->stripe_type) {
case STRIPE_SPARSE:
return offset;
case STRIPE_DENSE:
return filelayout_get_dense_offset(flseg, offset);
}
BUG();
}
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
int *reset)
{
struct nfs_server *mds_server = NFS_SERVER(state->inode);
struct nfs_client *mds_client = mds_server->nfs_client;
if (task->tk_status >= 0)
return 0;
*reset = 0;
switch (task->tk_status) {
/* MDS state errors */
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
nfs_remove_bad_delegation(state->inode);
case -NFS4ERR_OPENMODE:
nfs4_schedule_stateid_recovery(mds_server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
nfs4_schedule_stateid_recovery(mds_server, state);
nfs4_schedule_lease_recovery(mds_client);
goto wait_on_recovery;
/* DS session errors */
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session. Exchangeid "
"flags 0x%x\n", __func__, task->tk_status,
clp->cl_exchange_flags);
nfs4_schedule_session_recovery(clp->cl_session);
break;
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
case -EKEYEXPIRED:
rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
default:
dprintk("%s DS error. Retry through MDS %d\n", __func__,
task->tk_status);
*reset = 1;
break;
}
out:
task->tk_status = 0;
return -EAGAIN;
wait_on_recovery:
rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
goto out;
}
/* NFS_PROTO call done callback routines */
static int filelayout_read_done_cb(struct rpc_task *task,
struct nfs_read_data *data)
{
int reset = 0;
dprintk("%s DS read\n", __func__);
if (filelayout_async_handle_error(task, data->args.context->state,
data->ds_clp, &reset) == -EAGAIN) {
dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
pnfs_set_lo_fail(data->lseg);
nfs4_reset_read(task, data);
}
rpc_restart_call_prepare(task);
return -EAGAIN;
}
return 0;
}
/*
* We reference the rpc_cred of the first WRITE that triggers the need for
* a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
* rfc5661 is not clear about which credential should be used.
*/
static void
filelayout_set_layoutcommit(struct nfs_write_data *wdata)
{
if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
wdata->res.verf->committed == NFS_FILE_SYNC)
return;
pnfs_set_layoutcommit(wdata);
dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
(unsigned long) NFS_I(wdata->inode)->layout->plh_lwb);
}
/*
* Call ops for the async read/write cases
* In the case of dense layouts, the offset needs to be reset to its
* original value.
*/
static void filelayout_read_prepare(struct rpc_task *task, void *data)
{
struct nfs_read_data *rdata = (struct nfs_read_data *)data;
rdata->read_done_cb = filelayout_read_done_cb;
if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
&rdata->args.seq_args, &rdata->res.seq_res,
task))
return;
rpc_call_start(task);
}
static void filelayout_read_call_done(struct rpc_task *task, void *data)
{
struct nfs_read_data *rdata = (struct nfs_read_data *)data;
dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
/* Note this may cause RPC to be resent */
rdata->mds_ops->rpc_call_done(task, data);
}
static void filelayout_read_count_stats(struct rpc_task *task, void *data)
{
struct nfs_read_data *rdata = (struct nfs_read_data *)data;
rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics);
}
static void filelayout_read_release(void *data)
{
struct nfs_read_data *rdata = (struct nfs_read_data *)data;
put_lseg(rdata->lseg);
rdata->mds_ops->rpc_release(data);
}
static int filelayout_write_done_cb(struct rpc_task *task,
struct nfs_write_data *data)
{
int reset = 0;
if (filelayout_async_handle_error(task, data->args.context->state,
data->ds_clp, &reset) == -EAGAIN) {
dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
pnfs_set_lo_fail(data->lseg);
nfs4_reset_write(task, data);
}
rpc_restart_call_prepare(task);
return -EAGAIN;
}
filelayout_set_layoutcommit(data);
return 0;
}
/* Fake up some data that will cause nfs_commit_release to retry the writes. */
static void prepare_to_resend_writes(struct nfs_write_data *data)
{
struct nfs_page *first = nfs_list_entry(data->pages.next);
data->task.tk_status = 0;
memcpy(data->verf.verifier, first->wb_verf.verifier,
sizeof(first->wb_verf.verifier));
data->verf.verifier[0]++; /* ensure verifier mismatch */
}
static int filelayout_commit_done_cb(struct rpc_task *task,
struct nfs_write_data *data)
{
int reset = 0;
if (filelayout_async_handle_error(task, data->args.context->state,
data->ds_clp, &reset) == -EAGAIN) {
dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
prepare_to_resend_writes(data);
pnfs_set_lo_fail(data->lseg);
} else
rpc_restart_call_prepare(task);
return -EAGAIN;
}
return 0;
}
static void filelayout_write_prepare(struct rpc_task *task, void *data)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)data;
if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
&wdata->args.seq_args, &wdata->res.seq_res,
task))
return;
rpc_call_start(task);
}
static void filelayout_write_call_done(struct rpc_task *task, void *data)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)data;
/* Note this may cause RPC to be resent */
wdata->mds_ops->rpc_call_done(task, data);
}
static void filelayout_write_count_stats(struct rpc_task *task, void *data)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)data;
rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics);
}
static void filelayout_write_release(void *data)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)data;
put_lseg(wdata->lseg);
wdata->mds_ops->rpc_release(data);
}
static void filelayout_commit_release(void *data)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)data;
nfs_commit_release_pages(wdata);
if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
nfs_commit_clear_lock(NFS_I(wdata->inode));
put_lseg(wdata->lseg);
nfs_commitdata_release(wdata);
}
static const struct rpc_call_ops filelayout_read_call_ops = {
.rpc_call_prepare = filelayout_read_prepare,
.rpc_call_done = filelayout_read_call_done,
.rpc_count_stats = filelayout_read_count_stats,
.rpc_release = filelayout_read_release,
};
static const struct rpc_call_ops filelayout_write_call_ops = {
.rpc_call_prepare = filelayout_write_prepare,
.rpc_call_done = filelayout_write_call_done,
.rpc_count_stats = filelayout_write_count_stats,
.rpc_release = filelayout_write_release,
};
static const struct rpc_call_ops filelayout_commit_call_ops = {
.rpc_call_prepare = filelayout_write_prepare,
.rpc_call_done = filelayout_write_call_done,
.rpc_count_stats = filelayout_write_count_stats,
.rpc_release = filelayout_commit_release,
};
static enum pnfs_try_status
filelayout_read_pagelist(struct nfs_read_data *data)
{
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
loff_t offset = data->args.offset;
u32 j, idx;
struct nfs_fh *fh;
int status;
dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
__func__, data->inode->i_ino,
data->args.pgbase, (size_t)data->args.count, offset);
if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
return PNFS_NOT_ATTEMPTED;
/* Retrieve the correct rpc_client for the byte range */
j = nfs4_fl_calc_j_index(lseg, offset);
idx = nfs4_fl_calc_ds_index(lseg, j);
ds = nfs4_fl_prepare_ds(lseg, idx);
if (!ds) {
/* Either layout fh index faulty, or ds connect failed */
set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
return PNFS_NOT_ATTEMPTED;
}
dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr);
/* No multipath support. Use first DS */
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
data->args.fh = fh;
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
data->mds_offset = offset;
/* Perform an asynchronous read to ds */
status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient,
&filelayout_read_call_ops);
BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
/* Perform async writes. */
static enum pnfs_try_status
filelayout_write_pagelist(struct nfs_write_data *data, int sync)
{
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
loff_t offset = data->args.offset;
u32 j, idx;
struct nfs_fh *fh;
int status;
if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
return PNFS_NOT_ATTEMPTED;
/* Retrieve the correct rpc_client for the byte range */
j = nfs4_fl_calc_j_index(lseg, offset);
idx = nfs4_fl_calc_ds_index(lseg, j);
ds = nfs4_fl_prepare_ds(lseg, idx);
if (!ds) {
printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
__func__);
set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
return PNFS_NOT_ATTEMPTED;
}
dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__,
data->inode->i_ino, sync, (size_t) data->args.count, offset,
ds->ds_remotestr);
data->write_done_cb = filelayout_write_done_cb;
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
data->args.fh = fh;
/*
* Get the file offset on the dserver. Set the write offset to
* this offset and save the original offset.
*/
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
/* Perform an asynchronous write */
status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
&filelayout_write_call_ops, sync);
BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
/*
* filelayout_check_layout()
*
* Make sure layout segment parameters are sane WRT the device.
* At this point no generic layer initialization of the lseg has occurred,
* and nothing has been added to the layout_hdr cache.
*
*/
static int
filelayout_check_layout(struct pnfs_layout_hdr *lo,
struct nfs4_filelayout_segment *fl,
struct nfs4_layoutget_res *lgr,
struct nfs4_deviceid *id,
gfp_t gfp_flags)
{
struct nfs4_deviceid_node *d;
struct nfs4_file_layout_dsaddr *dsaddr;
int status = -EINVAL;
struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
dprintk("--> %s\n", __func__);
/* FIXME: remove this check when layout segment support is added */
if (lgr->range.offset != 0 ||
lgr->range.length != NFS4_MAX_UINT64) {
dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
__func__);
goto out;
}
if (fl->pattern_offset > lgr->range.offset) {
dprintk("%s pattern_offset %lld too large\n",
__func__, fl->pattern_offset);
goto out;
}
if (!fl->stripe_unit || fl->stripe_unit % PAGE_SIZE) {
dprintk("%s Invalid stripe unit (%u)\n",
__func__, fl->stripe_unit);
goto out;
}
/* find and reference the deviceid */
d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld,
NFS_SERVER(lo->plh_inode)->nfs_client, id);
if (d == NULL) {
dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
if (dsaddr == NULL)
goto out;
} else
dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
/* Found deviceid is being reaped */
if (test_bit(NFS_DEVICEID_INVALID, &dsaddr->id_node.flags))
goto out_put;
fl->dsaddr = dsaddr;
if (fl->first_stripe_index >= dsaddr->stripe_count) {
dprintk("%s Bad first_stripe_index %u\n",
__func__, fl->first_stripe_index);
goto out_put;
}
if ((fl->stripe_type == STRIPE_SPARSE &&
fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
(fl->stripe_type == STRIPE_DENSE &&
fl->num_fh != dsaddr->stripe_count)) {
dprintk("%s num_fh %u not valid for given packing\n",
__func__, fl->num_fh);
goto out_put;
}
if (fl->stripe_unit % nfss->rsize || fl->stripe_unit % nfss->wsize) {
dprintk("%s Stripe unit (%u) not aligned with rsize %u "
"wsize %u\n", __func__, fl->stripe_unit, nfss->rsize,
nfss->wsize);
}
status = 0;
out:
dprintk("--> %s returns %d\n", __func__, status);
return status;
out_put:
nfs4_fl_put_deviceid(dsaddr);
goto out;
}
static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
{
int i;
for (i = 0; i < fl->num_fh; i++) {
if (!fl->fh_array[i])
break;
kfree(fl->fh_array[i]);
}
kfree(fl->fh_array);
fl->fh_array = NULL;
}
static void
_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
{
filelayout_free_fh_array(fl);
kfree(fl);
}
static int
filelayout_decode_layout(struct pnfs_layout_hdr *flo,
struct nfs4_filelayout_segment *fl,
struct nfs4_layoutget_res *lgr,
struct nfs4_deviceid *id,
gfp_t gfp_flags)
{
struct xdr_stream stream;
struct xdr_buf buf;
struct page *scratch;
__be32 *p;
uint32_t nfl_util;
int i;
dprintk("%s: set_layout_map Begin\n", __func__);
scratch = alloc_page(gfp_flags);
if (!scratch)
return -ENOMEM;
xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
/* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
* num_fh (4) */
p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
if (unlikely(!p))
goto out_err;
memcpy(id, p, sizeof(*id));
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
nfs4_print_deviceid(id);
nfl_util = be32_to_cpup(p++);
if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
fl->commit_through_mds = 1;
if (nfl_util & NFL4_UFLG_DENSE)
fl->stripe_type = STRIPE_DENSE;
else
fl->stripe_type = STRIPE_SPARSE;
fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK;
fl->first_stripe_index = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &fl->pattern_offset);
fl->num_fh = be32_to_cpup(p++);
dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n",
__func__, nfl_util, fl->num_fh, fl->first_stripe_index,
fl->pattern_offset);
/* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
* Futher checking is done in filelayout_check_layout */
if (fl->num_fh >
max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
goto out_err;
if (fl->num_fh > 0) {
fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
gfp_flags);
if (!fl->fh_array)
goto out_err;
}
for (i = 0; i < fl->num_fh; i++) {
/* Do we want to use a mempool here? */
fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
if (!fl->fh_array[i])
goto out_err_free;
p = xdr_inline_decode(&stream, 4);
if (unlikely(!p))
goto out_err_free;
fl->fh_array[i]->size = be32_to_cpup(p++);
if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
printk(KERN_ERR "NFS: Too big fh %d received %d\n",
i, fl->fh_array[i]->size);
goto out_err_free;
}
p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
if (unlikely(!p))
goto out_err_free;
memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
dprintk("DEBUG: %s: fh len %d\n", __func__,
fl->fh_array[i]->size);
}
__free_page(scratch);
return 0;
out_err_free:
filelayout_free_fh_array(fl);
out_err:
__free_page(scratch);
return -EIO;
}
static void
filelayout_free_lseg(struct pnfs_layout_segment *lseg)
{
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
dprintk("--> %s\n", __func__);
nfs4_fl_put_deviceid(fl->dsaddr);
kfree(fl->commit_buckets);
_filelayout_free_lseg(fl);
}
static struct pnfs_layout_segment *
filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
struct nfs4_layoutget_res *lgr,
gfp_t gfp_flags)
{
struct nfs4_filelayout_segment *fl;
int rc;
struct nfs4_deviceid id;
dprintk("--> %s\n", __func__);
fl = kzalloc(sizeof(*fl), gfp_flags);
if (!fl)
return NULL;
rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
_filelayout_free_lseg(fl);
return NULL;
}
/* This assumes there is only one IOMODE_RW lseg. What
* we really want to do is have a layout_hdr level
* dictionary of <multipath_list4, fh> keys, each
* associated with a struct list_head, populated by calls
* to filelayout_write_pagelist().
* */
if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
int i;
int size = (fl->stripe_type == STRIPE_SPARSE) ?
fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags);
if (!fl->commit_buckets) {
filelayout_free_lseg(&fl->generic_hdr);
return NULL;
}
fl->number_of_buckets = size;
for (i = 0; i < size; i++) {
INIT_LIST_HEAD(&fl->commit_buckets[i].written);
INIT_LIST_HEAD(&fl->commit_buckets[i].committing);
}
}
return &fl->generic_hdr;
}
/*
* filelayout_pg_test(). Called by nfs_can_coalesce_requests()
*
* return true : coalesce page
* return false : don't coalesce page
*/
static bool
filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
struct nfs_page *req)
{
u64 p_stripe, r_stripe;
u32 stripe_unit;
if (!pnfs_generic_pg_test(pgio, prev, req) ||
!nfs_generic_pg_test(pgio, prev, req))
return false;
p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
do_div(p_stripe, stripe_unit);
do_div(r_stripe, stripe_unit);
return (p_stripe == r_stripe);
}
static void
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
BUG_ON(pgio->pg_lseg != NULL);
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
NFS4_MAX_UINT64,
IOMODE_READ,
GFP_KERNEL);
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_read_mds(pgio);
}
static void
filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
BUG_ON(pgio->pg_lseg != NULL);
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
NFS4_MAX_UINT64,
IOMODE_RW,
GFP_NOFS);
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_write_mds(pgio);
}
static const struct nfs_pageio_ops filelayout_pg_read_ops = {
.pg_init = filelayout_pg_init_read,
.pg_test = filelayout_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops filelayout_pg_write_ops = {
.pg_init = filelayout_pg_init_write,
.pg_test = filelayout_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
};
static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
{
if (fl->stripe_type == STRIPE_SPARSE)
return nfs4_fl_calc_ds_index(&fl->generic_hdr, j);
else
return j;
}
/* The generic layer is about to remove the req from the commit list.
* If this will make the bucket empty, it will need to put the lseg reference.
*/
static void
filelayout_clear_request_commit(struct nfs_page *req)
{
struct pnfs_layout_segment *freeme = NULL;
struct inode *inode = req->wb_context->dentry->d_inode;
spin_lock(&inode->i_lock);
if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
goto out;
if (list_is_singular(&req->wb_list)) {
struct pnfs_layout_segment *lseg;
/* From here we can find the bucket, but for the moment,
* since there is only one relevant lseg...
*/
list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
if (lseg->pls_range.iomode == IOMODE_RW) {
freeme = lseg;
break;
}
}
}
out:
nfs_request_remove_commit_list(req);
spin_unlock(&inode->i_lock);
put_lseg(freeme);
}
static struct list_head *
filelayout_choose_commit_list(struct nfs_page *req,
struct pnfs_layout_segment *lseg)
{
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
u32 i, j;
struct list_head *list;
if (fl->commit_through_mds)
return &NFS_I(req->wb_context->dentry->d_inode)->commit_list;
/* Note that we are calling nfs4_fl_calc_j_index on each page
* that ends up being committed to a data server. An attractive
* alternative is to add a field to nfs_write_data and nfs_page
* to store the value calculated in filelayout_write_pagelist
* and just use that here.
*/
j = nfs4_fl_calc_j_index(lseg,
(loff_t)req->wb_index << PAGE_CACHE_SHIFT);
i = select_bucket_index(fl, j);
list = &fl->commit_buckets[i].written;
if (list_empty(list)) {
/* Non-empty buckets hold a reference on the lseg. That ref
* is normally transferred to the COMMIT call and released
* there. It could also be released if the last req is pulled
* off due to a rewrite, in which case it will be done in
* filelayout_remove_commit_req
*/
get_lseg(lseg);
}
set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
return list;
}
static void
filelayout_mark_request_commit(struct nfs_page *req,
struct pnfs_layout_segment *lseg)
{
struct list_head *list;
list = filelayout_choose_commit_list(req, lseg);
nfs_request_add_commit_list(req, list);
}
static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
{
struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
if (flseg->stripe_type == STRIPE_SPARSE)
return i;
else
return nfs4_fl_calc_ds_index(lseg, i);
}
static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
{
struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
if (flseg->stripe_type == STRIPE_SPARSE) {
if (flseg->num_fh == 1)
i = 0;
else if (flseg->num_fh == 0)
/* Use the MDS OPEN fh set in nfs_read_rpcsetup */
return NULL;
}
return flseg->fh_array[i];
}
static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
{
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
u32 idx;
struct nfs_fh *fh;
idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
ds = nfs4_fl_prepare_ds(lseg, idx);
if (!ds) {
printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
__func__);
set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
prepare_to_resend_writes(data);
filelayout_commit_release(data);
return -EAGAIN;
}
dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
data->write_done_cb = filelayout_commit_done_cb;
data->ds_clp = ds->ds_clp;
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
if (fh)
data->args.fh = fh;
return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
&filelayout_commit_call_ops, how);
}
/*
* This is only useful while we are using whole file layouts.
*/
static struct pnfs_layout_segment *
find_only_write_lseg_locked(struct inode *inode)
{
struct pnfs_layout_segment *lseg;
list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
if (lseg->pls_range.iomode == IOMODE_RW)
return lseg;
return NULL;
}
static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
{
struct pnfs_layout_segment *rv;
spin_lock(&inode->i_lock);
rv = find_only_write_lseg_locked(inode);
if (rv)
get_lseg(rv);
spin_unlock(&inode->i_lock);
return rv;
}
static int
filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max,
spinlock_t *lock)
{
struct list_head *src = &bucket->written;
struct list_head *dst = &bucket->committing;
struct nfs_page *req, *tmp;
int ret = 0;
list_for_each_entry_safe(req, tmp, src, wb_list) {
if (!nfs_lock_request(req))
continue;
if (cond_resched_lock(lock))
list_safe_reset_next(req, tmp, wb_list);
nfs_request_remove_commit_list(req);
clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
nfs_list_add_request(req, dst);
ret++;
if (ret == max)
break;
}
return ret;
}
/* Move reqs from written to committing lists, returning count of number moved.
* Note called with i_lock held.
*/
static int filelayout_scan_commit_lists(struct inode *inode, int max,
spinlock_t *lock)
{
struct pnfs_layout_segment *lseg;
struct nfs4_filelayout_segment *fl;
int i, rv = 0, cnt;
lseg = find_only_write_lseg_locked(inode);
if (!lseg)
goto out_done;
fl = FILELAYOUT_LSEG(lseg);
if (fl->commit_through_mds)
goto out_done;
for (i = 0; i < fl->number_of_buckets && max != 0; i++) {
cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i],
max, lock);
max -= cnt;
rv += cnt;
}
out_done:
return rv;
}
static unsigned int
alloc_ds_commits(struct inode *inode, struct list_head *list)
{
struct pnfs_layout_segment *lseg;
struct nfs4_filelayout_segment *fl;
struct nfs_write_data *data;
int i, j;
unsigned int nreq = 0;
/* Won't need this when non-whole file layout segments are supported
* instead we will use a pnfs_layout_hdr structure */
lseg = find_only_write_lseg(inode);
if (!lseg)
return 0;
fl = FILELAYOUT_LSEG(lseg);
for (i = 0; i < fl->number_of_buckets; i++) {
if (list_empty(&fl->commit_buckets[i].committing))
continue;
data = nfs_commitdata_alloc();
if (!data)
break;
data->ds_commit_index = i;
data->lseg = lseg;
list_add(&data->pages, list);
nreq++;
}
/* Clean up on error */
for (j = i; j < fl->number_of_buckets; j++) {
if (list_empty(&fl->commit_buckets[i].committing))
continue;
nfs_retry_commit(&fl->commit_buckets[i].committing, lseg);
put_lseg(lseg); /* associated with emptying bucket */
}
put_lseg(lseg);
/* Caller will clean up entries put on list */
return nreq;
}
/* This follows nfs_commit_list pretty closely */
static int
filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
int how)
{
struct nfs_write_data *data, *tmp;
LIST_HEAD(list);
unsigned int nreq = 0;
if (!list_empty(mds_pages)) {
data = nfs_commitdata_alloc();
if (data != NULL) {
data->lseg = NULL;
list_add(&data->pages, &list);
nreq++;
} else
nfs_retry_commit(mds_pages, NULL);
}
nreq += alloc_ds_commits(inode, &list);
if (nreq == 0) {
nfs_commit_clear_lock(NFS_I(inode));
goto out;
}
atomic_add(nreq, &NFS_I(inode)->commits_outstanding);
list_for_each_entry_safe(data, tmp, &list, pages) {
list_del_init(&data->pages);
if (!data->lseg) {
nfs_init_commit(data, mds_pages, NULL);
nfs_initiate_commit(data, NFS_CLIENT(inode),
data->mds_ops, how);
} else {
nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg);
filelayout_initiate_commit(data, how);
}
}
out:
return PNFS_ATTEMPTED;
}
static void
filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
{
nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
}
static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
.owner = THIS_MODULE,
.alloc_lseg = filelayout_alloc_lseg,
.free_lseg = filelayout_free_lseg,
.pg_read_ops = &filelayout_pg_read_ops,
.pg_write_ops = &filelayout_pg_write_ops,
.mark_request_commit = filelayout_mark_request_commit,
.clear_request_commit = filelayout_clear_request_commit,
.scan_commit_lists = filelayout_scan_commit_lists,
.commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
.free_deviceid_node = filelayout_free_deveiceid_node,
};
static int __init nfs4filelayout_init(void)
{
printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n",
__func__);
return pnfs_register_layoutdriver(&filelayout_type);
}
static void __exit nfs4filelayout_exit(void)
{
printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n",
__func__);
pnfs_unregister_layoutdriver(&filelayout_type);
}
MODULE_ALIAS("nfs-layouttype4-1");
module_init(nfs4filelayout_init);
module_exit(nfs4filelayout_exit);
| gpl-2.0 |
zlaja/android_kernel_lge_msm8610 | drivers/net/ethernet/intel/ixgbevf/ethtool.c | 4859 | 21031 | /*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* ethtool support for ixgbevf */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
#include "ixgbevf.h"
#define IXGBE_ALL_RAR_ENTRIES 16
#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
int base_stat_offset;
int saved_reset_offset;
};
#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
offsetof(struct ixgbevf_adapter, m), \
offsetof(struct ixgbevf_adapter, b), \
offsetof(struct ixgbevf_adapter, r)
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
stats.saved_reset_vfgprc)},
{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
stats.saved_reset_vfgptc)},
{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
stats.saved_reset_vfgorc)},
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
{"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
{"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
zero_base)},
{"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)},
{"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
};
#define IXGBE_QUEUE_STATS_LEN 0
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
#endif /* ETHTOOL_GSTATS */
#ifdef ETHTOOL_TEST
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
#endif /* ETHTOOL_TEST */
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = 0;
bool link_up;
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = -1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
ethtool_cmd_speed_set(
ecmd,
(link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
SPEED_10000 : SPEED_1000);
ecmd->duplex = DUPLEX_FULL;
} else {
ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
return 0;
}
static u32 ixgbevf_get_msglevel(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
static char *ixgbevf_reg_names[] = {
"IXGBE_VFCTRL",
"IXGBE_VFSTATUS",
"IXGBE_VFLINKS",
"IXGBE_VFRXMEMWRAP",
"IXGBE_VFFRTIMER",
"IXGBE_VTEICR",
"IXGBE_VTEICS",
"IXGBE_VTEIMS",
"IXGBE_VTEIMC",
"IXGBE_VTEIAC",
"IXGBE_VTEIAM",
"IXGBE_VTEITR",
"IXGBE_VTIVAR",
"IXGBE_VTIVAR_MISC",
"IXGBE_VFRDBAL0",
"IXGBE_VFRDBAL1",
"IXGBE_VFRDBAH0",
"IXGBE_VFRDBAH1",
"IXGBE_VFRDLEN0",
"IXGBE_VFRDLEN1",
"IXGBE_VFRDH0",
"IXGBE_VFRDH1",
"IXGBE_VFRDT0",
"IXGBE_VFRDT1",
"IXGBE_VFRXDCTL0",
"IXGBE_VFRXDCTL1",
"IXGBE_VFSRRCTL0",
"IXGBE_VFSRRCTL1",
"IXGBE_VFPSRTYPE",
"IXGBE_VFTDBAL0",
"IXGBE_VFTDBAL1",
"IXGBE_VFTDBAH0",
"IXGBE_VFTDBAH1",
"IXGBE_VFTDLEN0",
"IXGBE_VFTDLEN1",
"IXGBE_VFTDH0",
"IXGBE_VFTDH1",
"IXGBE_VFTDT0",
"IXGBE_VFTDT1",
"IXGBE_VFTXDCTL0",
"IXGBE_VFTXDCTL1",
"IXGBE_VFTDWBAL0",
"IXGBE_VFTDWBAL1",
"IXGBE_VFTDWBAH0",
"IXGBE_VFTDWBAH1"
};
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
}
static void ixgbevf_get_regs(struct net_device *netdev,
struct ethtool_regs *regs,
void *p)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u32 regs_len = ixgbevf_get_regs_len(netdev);
u8 i;
memset(p, 0, regs_len);
regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
/* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
/* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead
* read EICS which is a shadow but doesn't clear EICR */
regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
/* Receive DMA */
for (i = 0; i < 2; i++)
regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
for (i = 0; i < 2; i++)
regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
for (i = 0; i < 2; i++)
regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
for (i = 0; i < 2; i++)
regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
for (i = 0; i < 2; i++)
regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
for (i = 0; i < 2; i++)
regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
for (i = 0; i < 2; i++)
regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
/* Receive */
regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
/* Transmit */
for (i = 0; i < 2; i++)
regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
for (i = 0; i < 2; i++)
regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
for (i = 0; i < 2; i++)
regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
for (i = 0; i < 2; i++)
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
for (i = 0; i < 2; i++)
regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
for (i = 0; i < 2; i++)
regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
for (i = 0; i < 2; i++)
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
for (i = 0; i < 2; i++)
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
}
static void ixgbevf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ixgbevf_driver_version,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = adapter->tx_ring;
struct ixgbevf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IXGBEVF_MAX_RXD;
ring->tx_max_pending = IXGBEVF_MAX_TXD;
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
int i, err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring->count) &&
(new_rx_count == adapter->rx_ring->count)) {
/* nothing to do */
return 0;
}
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
/*
* If the adapter isn't up and running then just set the
* new parameters and scurry for the exits.
*/
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
}
tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct ixgbevf_ring), GFP_KERNEL);
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
}
rx_ring = kcalloc(adapter->num_rx_queues,
sizeof(struct ixgbevf_ring), GFP_KERNEL);
if (!rx_ring) {
err = -ENOMEM;
goto err_rx_setup;
}
ixgbevf_down(adapter);
memcpy(tx_ring, adapter->tx_ring,
adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
if (err) {
while (i) {
i--;
ixgbevf_free_tx_resources(adapter,
&tx_ring[i]);
}
goto err_tx_ring_setup;
}
tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
memcpy(rx_ring, adapter->rx_ring,
adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring[i].count = new_rx_count;
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
if (err) {
while (i) {
i--;
ixgbevf_free_rx_resources(adapter,
&rx_ring[i]);
}
goto err_rx_ring_setup;
}
rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
/*
* Only switch to new rings if all the prior allocations
* and ring setups have succeeded.
*/
kfree(adapter->tx_ring);
adapter->tx_ring = tx_ring;
adapter->tx_ring_count = new_tx_count;
kfree(adapter->rx_ring);
adapter->rx_ring = rx_ring;
adapter->rx_ring_count = new_rx_count;
/* success! */
ixgbevf_up(adapter);
goto clear_reset;
err_rx_ring_setup:
for(i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
err_tx_ring_setup:
kfree(rx_ring);
err_rx_setup:
kfree(tx_ring);
clear_reset:
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
return err;
}
static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
{
switch (stringset) {
case ETH_SS_TEST:
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_GLOBAL_STATS_LEN;
default:
return -EINVAL;
}
}
static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
int i;
ixgbevf_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter +
ixgbe_gstrings_stats[i].stat_offset;
char *b = (char *)adapter +
ixgbe_gstrings_stats[i].base_stat_offset;
char *r = (char *)adapter +
ixgbe_gstrings_stats[i].saved_reset_offset;
data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
((ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
((ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
}
}
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
char *p = (char *)data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test,
IXGBE_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
memcpy(p, ixgbe_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
}
}
static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
{
struct ixgbe_hw *hw = &adapter->hw;
bool link_up;
u32 link_speed = 0;
*data = 0;
hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
if (!link_up)
*data = 1;
return *data;
}
/* ethtool register test data */
struct ixgbevf_reg_test {
u16 reg;
u8 array_len;
u8 test_type;
u32 mask;
u32 write;
};
/* In the hardware, registers are laid out either singly, in arrays
* spaced 0x40 bytes apart, or in contiguous tables. We assume
* most tests take place on arrays or single registers (handled
* as a single-element array) and special-case the tables.
* Table tests are always pattern tests.
*
* We also make provision for some required setup steps by specifying
* registers to be written without any read-back testing.
*/
#define PATTERN_TEST 1
#define SET_READ_TEST 2
#define WRITE_NO_TEST 3
#define TABLE32_TEST 4
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
/* default VF register test */
static const struct ixgbevf_reg_test reg_test_vf[] = {
{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
{ IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
{ 0, 0, 0, 0 }
};
static const u32 register_test_patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
#define REG_PATTERN_TEST(R, M, W) \
{ \
u32 pat, val, before; \
for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
before = readl(adapter->hw.hw_addr + R); \
writel((register_test_patterns[pat] & W), \
(adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if (val != (register_test_patterns[pat] & W & M)) { \
hw_dbg(&adapter->hw, \
"pattern test reg %04X failed: got " \
"0x%08X expected 0x%08X\n", \
R, val, (register_test_patterns[pat] & W & M)); \
*data = R; \
writel(before, adapter->hw.hw_addr + R); \
return 1; \
} \
writel(before, adapter->hw.hw_addr + R); \
} \
}
#define REG_SET_AND_CHECK(R, M, W) \
{ \
u32 val, before; \
before = readl(adapter->hw.hw_addr + R); \
writel((W & M), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if ((W & M) != (val & M)) { \
pr_err("set/check reg %04X test failed: got 0x%08X expected " \
"0x%08X\n", R, (val & M), (W & M)); \
*data = R; \
writel(before, (adapter->hw.hw_addr + R)); \
return 1; \
} \
writel(before, (adapter->hw.hw_addr + R)); \
}
static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
{
const struct ixgbevf_reg_test *test;
u32 i;
test = reg_test_vf;
/*
* Perform the register test, looping through the test table
* until we either fail or reach the null entry.
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
switch (test->test_type) {
case PATTERN_TEST:
REG_PATTERN_TEST(test->reg + (i * 0x40),
test->mask,
test->write);
break;
case SET_READ_TEST:
REG_SET_AND_CHECK(test->reg + (i * 0x40),
test->mask,
test->write);
break;
case WRITE_NO_TEST:
writel(test->write,
(adapter->hw.hw_addr + test->reg)
+ (i * 0x40));
break;
case TABLE32_TEST:
REG_PATTERN_TEST(test->reg + (i * 4),
test->mask,
test->write);
break;
case TABLE64_TEST_LO:
REG_PATTERN_TEST(test->reg + (i * 8),
test->mask,
test->write);
break;
case TABLE64_TEST_HI:
REG_PATTERN_TEST((test->reg + 4) + (i * 8),
test->mask,
test->write);
break;
}
}
test++;
}
*data = 0;
return *data;
}
static void ixgbevf_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
set_bit(__IXGBEVF_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
hw_dbg(&adapter->hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
if (ixgbevf_link_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
else
ixgbevf_reset(adapter);
hw_dbg(&adapter->hw, "register testing starting\n");
if (ixgbevf_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
ixgbevf_reset(adapter);
clear_bit(__IXGBEVF_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
} else {
hw_dbg(&adapter->hw, "online testing starting\n");
/* Online tests */
if (ixgbevf_link_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Online tests aren't run; pass by default */
data[0] = 0;
clear_bit(__IXGBEVF_TESTING, &adapter->state);
}
msleep_interruptible(4 * 1000);
}
static int ixgbevf_nway_reset(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
if (!adapter->dev_closed)
ixgbevf_reinit_locked(adapter);
}
return 0;
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
.nway_reset = ixgbevf_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = ixgbevf_get_ringparam,
.set_ringparam = ixgbevf_set_ringparam,
.get_msglevel = ixgbevf_get_msglevel,
.set_msglevel = ixgbevf_set_msglevel,
.self_test = ixgbevf_diag_test,
.get_sset_count = ixgbevf_get_sset_count,
.get_strings = ixgbevf_get_strings,
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
}
| gpl-2.0 |
PRJosh/android_kernel_samsung_mondrianwifi | arch/arm/mach-pnx4008/irq.c | 4859 | 4001 | /*
* arch/arm/mach-pnx4008/irq.c
*
* PNX4008 IRQ controller driver
*
* Author: Dmitry Chigirev <source@mvista.com>
*
* Based on reference code received from Philips:
* Copyright (C) 2003 Philips Semiconductors
*
* 2005 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <mach/irq.h>
static u8 pnx4008_irq_type[NR_IRQS] = PNX4008_IRQ_TYPES;
static void pnx4008_mask_irq(struct irq_data *d)
{
__raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq)); /* mask interrupt */
}
static void pnx4008_unmask_irq(struct irq_data *d)
{
__raw_writel(__raw_readl(INTC_ER(d->irq)) | INTC_BIT(d->irq), INTC_ER(d->irq)); /* unmask interrupt */
}
static void pnx4008_mask_ack_irq(struct irq_data *d)
{
__raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq)); /* mask interrupt */
__raw_writel(INTC_BIT(d->irq), INTC_SR(d->irq)); /* clear interrupt status */
}
static int pnx4008_set_irq_type(struct irq_data *d, unsigned int type)
{
switch (type) {
case IRQ_TYPE_EDGE_RISING:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /*rising edge */
irq_set_handler(d->irq, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*falling edge */
irq_set_handler(d->irq, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*low level */
irq_set_handler(d->irq, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /* high level */
irq_set_handler(d->irq, handle_level_irq);
break;
/* IRQ_TYPE_EDGE_BOTH is not supported */
default:
printk(KERN_ERR "PNX4008 IRQ: Unsupported irq type %d\n", type);
return -1;
}
return 0;
}
static struct irq_chip pnx4008_irq_chip = {
.irq_ack = pnx4008_mask_ack_irq,
.irq_mask = pnx4008_mask_irq,
.irq_unmask = pnx4008_unmask_irq,
.irq_set_type = pnx4008_set_irq_type,
};
void __init pnx4008_init_irq(void)
{
unsigned int i;
/* configure IRQ's */
for (i = 0; i < NR_IRQS; i++) {
set_irq_flags(i, IRQF_VALID);
irq_set_chip(i, &pnx4008_irq_chip);
pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]);
}
/* configure and enable IRQ 0,1,30,31 (cascade interrupts) */
pnx4008_set_irq_type(irq_get_irq_data(SUB1_IRQ_N),
pnx4008_irq_type[SUB1_IRQ_N]);
pnx4008_set_irq_type(irq_get_irq_data(SUB2_IRQ_N),
pnx4008_irq_type[SUB2_IRQ_N]);
pnx4008_set_irq_type(irq_get_irq_data(SUB1_FIQ_N),
pnx4008_irq_type[SUB1_FIQ_N]);
pnx4008_set_irq_type(irq_get_irq_data(SUB2_FIQ_N),
pnx4008_irq_type[SUB2_FIQ_N]);
/* mask all others */
__raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) |
(1 << SUB2_IRQ_N) | (1 << SUB1_IRQ_N),
INTC_ER(MAIN_BASE_INT));
__raw_writel(0, INTC_ER(SIC1_BASE_INT));
__raw_writel(0, INTC_ER(SIC2_BASE_INT));
}
| gpl-2.0 |
ShadySquirrel/e980-zeKrnl | net/dccp/output.c | 4859 | 20660 | /*
* net/dccp/output.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/dccp.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/inet_sock.h>
#include <net/sock.h>
#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
static inline void dccp_event_ack_sent(struct sock *sk)
{
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
/* enqueue @skb on sk_send_head for retransmission, return clone to send now */
static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
{
skb_set_owner_w(skb, sk);
WARN_ON(sk->sk_send_head);
sk->sk_send_head = skb;
return skb_clone(sk->sk_send_head, gfp_any());
}
/*
* All SKB's seen here are completely headerless. It is our
* job to build the DCCP header, and pass the packet down to
* IP so it can do the same plus pass the packet off to the
* device.
*/
static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
{
if (likely(skb != NULL)) {
struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
struct dccp_hdr *dh;
/* XXX For now we're using only 48 bits sequence numbers */
const u32 dccp_header_size = sizeof(*dh) +
sizeof(struct dccp_hdr_ext) +
dccp_packet_hdr_len(dcb->dccpd_type);
int err, set_ack = 1;
u64 ackno = dp->dccps_gsr;
/*
* Increment GSS here already in case the option code needs it.
* Update GSS for real only if option processing below succeeds.
*/
dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
switch (dcb->dccpd_type) {
case DCCP_PKT_DATA:
set_ack = 0;
/* fall through */
case DCCP_PKT_DATAACK:
case DCCP_PKT_RESET:
break;
case DCCP_PKT_REQUEST:
set_ack = 0;
/* Use ISS on the first (non-retransmitted) Request. */
if (icsk->icsk_retransmits == 0)
dcb->dccpd_seq = dp->dccps_iss;
/* fall through */
case DCCP_PKT_SYNC:
case DCCP_PKT_SYNCACK:
ackno = dcb->dccpd_ack_seq;
/* fall through */
default:
/*
* Set owner/destructor: some skbs are allocated via
* alloc_skb (e.g. when retransmission may happen).
* Only Data, DataAck, and Reset packets should come
* through here with skb->sk set.
*/
WARN_ON(skb->sk);
skb_set_owner_w(skb, sk);
break;
}
if (dccp_insert_options(sk, skb)) {
kfree_skb(skb);
return -EPROTO;
}
/* Build DCCP header and checksum it. */
dh = dccp_zeroed_hdr(skb, dccp_header_size);
dh->dccph_type = dcb->dccpd_type;
dh->dccph_sport = inet->inet_sport;
dh->dccph_dport = inet->inet_dport;
dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
dh->dccph_ccval = dcb->dccpd_ccval;
dh->dccph_cscov = dp->dccps_pcslen;
/* XXX For now we're using only 48 bits sequence numbers */
dh->dccph_x = 1;
dccp_update_gss(sk, dcb->dccpd_seq);
dccp_hdr_set_seq(dh, dp->dccps_gss);
if (set_ack)
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
switch (dcb->dccpd_type) {
case DCCP_PKT_REQUEST:
dccp_hdr_request(skb)->dccph_req_service =
dp->dccps_service;
/*
* Limit Ack window to ISS <= P.ackno <= GSS, so that
* only Responses to Requests we sent are considered.
*/
dp->dccps_awl = dp->dccps_iss;
break;
case DCCP_PKT_RESET:
dccp_hdr_reset(skb)->dccph_reset_code =
dcb->dccpd_reset_code;
break;
}
icsk->icsk_af_ops->send_check(sk, skb);
if (set_ack)
dccp_event_ack_sent(sk);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
return net_xmit_eval(err);
}
return -ENOBUFS;
}
/**
* dccp_determine_ccmps - Find out about CCID-specific packet-size limits
* We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
* since the RX CCID is restricted to feedback packets (Acks), which are small
* in comparison with the data traffic. A value of 0 means "no current CCMPS".
*/
static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
{
const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
return 0;
return tx_ccid->ccid_ops->ccid_ccmps;
}
unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct dccp_sock *dp = dccp_sk(sk);
u32 ccmps = dccp_determine_ccmps(dp);
u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
/* Account for header lengths and IPv4/v6 option overhead */
cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
/*
* Leave enough headroom for common DCCP header options.
* This only considers options which may appear on DCCP-Data packets, as
* per table 3 in RFC 4340, 5.8. When running out of space for other
* options (eg. Ack Vector which can take up to 255 bytes), it is better
* to schedule a separate Ack. Thus we leave headroom for the following:
* - 1 byte for Slow Receiver (11.6)
* - 6 bytes for Timestamp (13.1)
* - 10 bytes for Timestamp Echo (13.3)
* - 8 bytes for NDP count (7.7, when activated)
* - 6 bytes for Data Checksum (9.3)
* - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
*/
cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
(dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
/* And store cached results */
icsk->icsk_pmtu_cookie = pmtu;
dp->dccps_mss_cache = cur_mps;
return cur_mps;
}
EXPORT_SYMBOL_GPL(dccp_sync_mss);
void dccp_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
/**
* dccp_wait_for_ccid - Await CCID send permission
* @sk: socket to wait for
* @delay: timeout in jiffies
* This is used by CCIDs which need to delay the send time in process context.
*/
static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
{
DEFINE_WAIT(wait);
long remaining;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk->sk_write_pending++;
release_sock(sk);
remaining = schedule_timeout(delay);
lock_sock(sk);
sk->sk_write_pending--;
finish_wait(sk_sleep(sk), &wait);
if (signal_pending(current) || sk->sk_err)
return -1;
return remaining;
}
/**
* dccp_xmit_packet - Send data packet under control of CCID
* Transmits next-queued payload and informs CCID to account for the packet.
*/
static void dccp_xmit_packet(struct sock *sk)
{
int err, len;
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb = dccp_qpolicy_pop(sk);
if (unlikely(skb == NULL))
return;
len = skb->len;
if (sk->sk_state == DCCP_PARTOPEN) {
const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
/*
* See 8.1.5 - Handshake Completion.
*
* For robustness we resend Confirm options until the client has
* entered OPEN. During the initial feature negotiation, the MPS
* is smaller than usual, reduced by the Change/Confirm options.
*/
if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
DCCP_WARN("Payload too large (%d) for featneg.\n", len);
dccp_send_ack(sk);
dccp_feat_list_purge(&dp->dccps_featneg);
}
inet_csk_schedule_ack(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
inet_csk(sk)->icsk_rto,
DCCP_RTO_MAX);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
} else if (dccp_ack_pending(sk)) {
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
} else {
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
}
err = dccp_transmit_skb(sk, skb);
if (err)
dccp_pr_debug("transmit_skb() returned err=%d\n", err);
/*
* Register this one as sent even if an error occurred. To the remote
* end a local packet drop is indistinguishable from network loss, i.e.
* any local drop will eventually be reported via receiver feedback.
*/
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
/*
* If the CCID needs to transfer additional header options out-of-band
* (e.g. Ack Vectors or feature-negotiation options), it activates this
* flag to schedule a Sync. The Sync will automatically incorporate all
* currently pending header options, thus clearing the backlog.
*/
if (dp->dccps_sync_scheduled)
dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
}
/**
* dccp_flush_write_queue - Drain queue at end of connection
* Since dccp_sendmsg queues packets without waiting for them to be sent, it may
* happen that the TX queue is not empty at the end of a connection. We give the
* HC-sender CCID a grace period of up to @time_budget jiffies. If this function
* returns with a non-empty write queue, it will be purged later.
*/
void dccp_flush_write_queue(struct sock *sk, long *time_budget)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
long delay, rc;
while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
switch (ccid_packet_dequeue_eval(rc)) {
case CCID_PACKET_WILL_DEQUEUE_LATER:
/*
* If the CCID determines when to send, the next sending
* time is unknown or the CCID may not even send again
* (e.g. remote host crashes or lost Ack packets).
*/
DCCP_WARN("CCID did not manage to send all packets\n");
return;
case CCID_PACKET_DELAY:
delay = msecs_to_jiffies(rc);
if (delay > *time_budget)
return;
rc = dccp_wait_for_ccid(sk, delay);
if (rc < 0)
return;
*time_budget -= (delay - rc);
/* check again if we can send now */
break;
case CCID_PACKET_SEND_AT_ONCE:
dccp_xmit_packet(sk);
break;
case CCID_PACKET_ERR:
skb_dequeue(&sk->sk_write_queue);
kfree_skb(skb);
dccp_pr_debug("packet discarded due to err=%ld\n", rc);
}
}
}
void dccp_write_xmit(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
while ((skb = dccp_qpolicy_top(sk))) {
int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
switch (ccid_packet_dequeue_eval(rc)) {
case CCID_PACKET_WILL_DEQUEUE_LATER:
return;
case CCID_PACKET_DELAY:
sk_reset_timer(sk, &dp->dccps_xmit_timer,
jiffies + msecs_to_jiffies(rc));
return;
case CCID_PACKET_SEND_AT_ONCE:
dccp_xmit_packet(sk);
break;
case CCID_PACKET_ERR:
dccp_qpolicy_drop(sk, skb);
dccp_pr_debug("packet discarded due to err=%d\n", rc);
}
}
}
/**
* dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
* There are only four retransmittable packet types in DCCP:
* - Request in client-REQUEST state (sec. 8.1.1),
* - CloseReq in server-CLOSEREQ state (sec. 8.3),
* - Close in node-CLOSING state (sec. 8.3),
* - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
* This function expects sk->sk_send_head to contain the original skb.
*/
int dccp_retransmit_skb(struct sock *sk)
{
WARN_ON(sk->sk_send_head == NULL);
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
return -EHOSTUNREACH; /* Routing failure or similar. */
/* this count is used to distinguish original and retransmitted skb */
inet_csk(sk)->icsk_retransmits++;
return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
}
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
struct request_sock *req)
{
struct dccp_hdr *dh;
struct dccp_request_sock *dreq;
const u32 dccp_header_size = sizeof(struct dccp_hdr) +
sizeof(struct dccp_hdr_ext) +
sizeof(struct dccp_hdr_response);
struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
GFP_ATOMIC);
if (skb == NULL)
return NULL;
/* Reserve space for headers. */
skb_reserve(skb, sk->sk_prot->max_header);
skb_dst_set(skb, dst_clone(dst));
dreq = dccp_rsk(req);
if (inet_rsk(req)->acked) /* increase GSS upon retransmission */
dccp_inc_seqno(&dreq->dreq_gss);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
/* Resolve feature dependencies resulting from choice of CCID */
if (dccp_feat_server_ccid_dependencies(dreq))
goto response_failed;
if (dccp_insert_options_rsk(dreq, skb))
goto response_failed;
/* Build and checksum header */
dh = dccp_zeroed_hdr(skb, dccp_header_size);
dh->dccph_sport = inet_rsk(req)->loc_port;
dh->dccph_dport = inet_rsk(req)->rmt_port;
dh->dccph_doff = (dccp_header_size +
DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
dh->dccph_type = DCCP_PKT_RESPONSE;
dh->dccph_x = 1;
dccp_hdr_set_seq(dh, dreq->dreq_gss);
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
dccp_csum_outgoing(skb);
/* We use `acked' to remember that a Response was already sent. */
inet_rsk(req)->acked = 1;
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
return skb;
response_failed:
kfree_skb(skb);
return NULL;
}
EXPORT_SYMBOL_GPL(dccp_make_response);
/* answer offending packet in @rcv_skb with Reset from control socket @ctl */
struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
{
struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
sizeof(struct dccp_hdr_ext) +
sizeof(struct dccp_hdr_reset);
struct dccp_hdr_reset *dhr;
struct sk_buff *skb;
skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
if (skb == NULL)
return NULL;
skb_reserve(skb, sk->sk_prot->max_header);
/* Swap the send and the receive. */
dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
dh->dccph_type = DCCP_PKT_RESET;
dh->dccph_sport = rxdh->dccph_dport;
dh->dccph_dport = rxdh->dccph_sport;
dh->dccph_doff = dccp_hdr_reset_len / 4;
dh->dccph_x = 1;
dhr = dccp_hdr_reset(skb);
dhr->dccph_reset_code = dcb->dccpd_reset_code;
switch (dcb->dccpd_reset_code) {
case DCCP_RESET_CODE_PACKET_ERROR:
dhr->dccph_reset_data[0] = rxdh->dccph_type;
break;
case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */
case DCCP_RESET_CODE_MANDATORY_ERROR:
memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
break;
}
/*
* From RFC 4340, 8.3.1:
* If P.ackno exists, set R.seqno := P.ackno + 1.
* Else set R.seqno := 0.
*/
if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
dccp_csum_outgoing(skb);
return skb;
}
EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
/* send Reset on established socket, to close or abort the connection */
int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
{
struct sk_buff *skb;
/*
* FIXME: what if rebuild_header fails?
* Should we be doing a rebuild_header here?
*/
int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
if (err != 0)
return err;
skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
if (skb == NULL)
return -ENOBUFS;
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, sk->sk_prot->max_header);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
DCCP_SKB_CB(skb)->dccpd_reset_code = code;
return dccp_transmit_skb(sk, skb);
}
/*
* Do all connect socket setups that can be done AF independent.
*/
int dccp_connect(struct sock *sk)
{
struct sk_buff *skb;
struct dccp_sock *dp = dccp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
sk->sk_err = 0;
sock_reset_flag(sk, SOCK_DONE);
dccp_sync_mss(sk, dst_mtu(dst));
/* do not connect if feature negotiation setup fails */
if (dccp_feat_finalise_settings(dccp_sk(sk)))
return -EPROTO;
/* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
dp->dccps_gar = dp->dccps_iss;
skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
if (unlikely(skb == NULL))
return -ENOBUFS;
/* Reserve space for headers. */
skb_reserve(skb, sk->sk_prot->max_header);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
/* Timer for repeating the REQUEST until an answer. */
icsk->icsk_retransmits = 0;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
icsk->icsk_rto, DCCP_RTO_MAX);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_connect);
void dccp_send_ack(struct sock *sk)
{
/* If we have been reset, we may not send again. */
if (sk->sk_state != DCCP_CLOSED) {
struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
GFP_ATOMIC);
if (skb == NULL) {
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX,
DCCP_RTO_MAX);
return;
}
/* Reserve space for headers */
skb_reserve(skb, sk->sk_prot->max_header);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
dccp_transmit_skb(sk, skb);
}
}
EXPORT_SYMBOL_GPL(dccp_send_ack);
#if 0
/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
void dccp_send_delayed_ack(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
/*
* FIXME: tune this timer. elapsed time fixes the skew, so no problem
* with using 2s, and active senders also piggyback the ACK into a
* DATAACK packet, so this is really for quiescent senders.
*/
unsigned long timeout = jiffies + 2 * HZ;
/* Use new timeout only if there wasn't a older one earlier. */
if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
/* If delack timer was blocked or is about to expire,
* send ACK now.
*
* FIXME: check the "about to expire" part
*/
if (icsk->icsk_ack.blocked) {
dccp_send_ack(sk);
return;
}
if (!time_before(timeout, icsk->icsk_ack.timeout))
timeout = icsk->icsk_ack.timeout;
}
icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
icsk->icsk_ack.timeout = timeout;
sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}
#endif
void dccp_send_sync(struct sock *sk, const u64 ackno,
const enum dccp_pkt_type pkt_type)
{
/*
* We are not putting this on the write queue, so
* dccp_transmit_skb() will set the ownership to this
* sock.
*/
struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
if (skb == NULL) {
/* FIXME: how to make sure the sync is sent? */
DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
return;
}
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, sk->sk_prot->max_header);
DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
/*
* Clear the flag in case the Sync was scheduled for out-of-band data,
* such as carrying a long Ack Vector.
*/
dccp_sk(sk)->dccps_sync_scheduled = 0;
dccp_transmit_skb(sk, skb);
}
EXPORT_SYMBOL_GPL(dccp_send_sync);
/*
* Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
* cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
* any circumstances.
*/
void dccp_send_close(struct sock *sk, const int active)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
skb = alloc_skb(sk->sk_prot->max_header, prio);
if (skb == NULL)
return;
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, sk->sk_prot->max_header);
if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
else
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
if (active) {
skb = dccp_skb_entail(sk, skb);
/*
* Retransmission timer for active-close: RFC 4340, 8.3 requires
* to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
* state can be left. The initial timeout is 2 RTTs.
* Since RTT measurement is done by the CCIDs, there is no easy
* way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
* is too low (200ms); we use a high value to avoid unnecessary
* retransmissions when the link RTT is > 0.2 seconds.
* FIXME: Let main module sample RTTs and use that instead.
*/
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
}
dccp_transmit_skb(sk, skb);
}
| gpl-2.0 |
invisiblek/android_kernel_htc_msm8960 | drivers/ata/pata_atiixp.c | 5115 | 8525 | /*
* pata_atiixp.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
* (C) 2009-2010 Bartlomiej Zolnierkiewicz
*
* Based on
*
* linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
*
* Copyright (C) 2003 ATI Inc. <hyu@ati.com>
* Copyright (C) 2004 Bartlomiej Zolnierkiewicz
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_atiixp"
#define DRV_VERSION "0.4.6"
enum {
ATIIXP_IDE_PIO_TIMING = 0x40,
ATIIXP_IDE_MWDMA_TIMING = 0x44,
ATIIXP_IDE_PIO_CONTROL = 0x48,
ATIIXP_IDE_PIO_MODE = 0x4a,
ATIIXP_IDE_UDMA_CONTROL = 0x54,
ATIIXP_IDE_UDMA_MODE = 0x56
};
static int atiixp_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 udma;
/* Hack from drivers/ide/pci. Really we want to know how to do the
raw detection not play follow the bios mode guess */
pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
static DEFINE_SPINLOCK(atiixp_lock);
/**
* atiixp_prereset - perform reset handling
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Reset sequence checking enable bits to see which ports are
* active.
*/
static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits atiixp_enable_bits[] = {
{ 0x48, 1, 0x01, 0x00 },
{ 0x48, 1, 0x08, 0x00 }
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* atiixp_set_pio_timing - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called by both the pio and dma setup functions to set the controller
* timings for PIO transfers. We must load both the mode number and
* timing values into the controller.
*/
static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
{
static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = 2 * ap->port_no + adev->devno;
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
u32 pio_timing_data;
u16 pio_mode_data;
pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
pio_mode_data &= ~(0x7 << (4 * dn));
pio_mode_data |= pio << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
pio_timing_data &= ~(0xFF << timing_shift);
pio_timing_data |= (pio_timings[pio] << timing_shift);
pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
}
/**
* atiixp_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. We use a shared helper for this
* as the DMA setup must also adjust the PIO timing information.
*/
static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned long flags;
spin_lock_irqsave(&atiixp_lock, flags);
atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
spin_unlock_irqrestore(&atiixp_lock, flags);
}
/**
* atiixp_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup. We use timing tables for most
* modes but must tune an appropriate PIO mode to match.
*/
static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dma = adev->dma_mode;
int dn = 2 * ap->port_no + adev->devno;
int wanted_pio;
unsigned long flags;
spin_lock_irqsave(&atiixp_lock, flags);
if (adev->dma_mode >= XFER_UDMA_0) {
u16 udma_mode_data;
dma -= XFER_UDMA_0;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
udma_mode_data &= ~(0x7 << (4 * dn));
udma_mode_data |= dma << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
} else {
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
u32 mwdma_timing_data;
dma -= XFER_MW_DMA_0;
pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
&mwdma_timing_data);
mwdma_timing_data &= ~(0xFF << timing_shift);
mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
mwdma_timing_data);
}
/*
* We must now look at the PIO mode situation. We may need to
* adjust the PIO mode to keep the timings acceptable
*/
if (adev->dma_mode >= XFER_MW_DMA_2)
wanted_pio = 4;
else if (adev->dma_mode == XFER_MW_DMA_1)
wanted_pio = 3;
else if (adev->dma_mode == XFER_MW_DMA_0)
wanted_pio = 0;
else BUG();
if (adev->pio_mode != wanted_pio)
atiixp_set_pio_timing(ap, adev, wanted_pio);
spin_unlock_irqrestore(&atiixp_lock, flags);
}
/**
* atiixp_bmdma_start - DMA start callback
* @qc: Command in progress
*
* When DMA begins we need to ensure that the UDMA control
* register for the channel is correctly set.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + adev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
if (ata_using_udma(adev))
tmp16 |= (1 << dn);
else
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_start(qc);
}
/**
* atiixp_dma_stop - DMA stop callback
* @qc: Command in progress
*
* DMA has completed. Clear the UDMA flag as the next operations will
* be PIO ones not UDMA data transfer.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + qc->dev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_stop(qc);
}
static struct scsi_host_template atiixp_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
};
static struct ata_port_operations atiixp_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
.prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
};
static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &atiixp_port_ops
};
const struct ata_port_info *ppi[] = { &info, &info };
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id atiixp[] = {
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
{ },
};
static struct pci_driver atiixp_pci_driver = {
.name = DRV_NAME,
.id_table = atiixp,
.probe = atiixp_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.resume = ata_pci_device_resume,
.suspend = ata_pci_device_suspend,
#endif
};
static int __init atiixp_init(void)
{
return pci_register_driver(&atiixp_pci_driver);
}
static void __exit atiixp_exit(void)
{
pci_unregister_driver(&atiixp_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, atiixp);
MODULE_VERSION(DRV_VERSION);
module_init(atiixp_init);
module_exit(atiixp_exit);
| gpl-2.0 |
viasyllable/android_kernel_lge_p970 | drivers/misc/ioc4.c | 7931 | 14731 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
/* This file contains the master driver module for use by SGI IOC4 subdrivers.
*
* It allocates any resources shared between multiple subdevices, and
* provides accessor functions (where needed) and the like for those
* resources. It also provides a mechanism for the subdevice modules
* to support loading and unloading.
*
* Non-shared resources (e.g. external interrupt A_INT_OUT register page
* alias, serial port and UART registers) are handled by the subdevice
* modules themselves.
*
* This is all necessary because IOC4 is not implemented as a multi-function
* PCI device, but an amalgamation of disparate registers for several
* types of device (ATA, serial, external interrupts). The normal
* resource management in the kernel doesn't have quite the right interfaces
* to handle this situation (e.g. multiple modules can't claim the same
* PCI ID), thus this IOC4 master module.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioc4.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <asm/io.h>
/***************
* Definitions *
***************/
/* Tweakable values */
/* PCI bus speed detection/calibration */
#define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */
#define IOC4_CALIBRATE_CYCLES 256 /* Average over this many cycles */
#define IOC4_CALIBRATE_DISCARD 2 /* Discard first few cycles */
#define IOC4_CALIBRATE_LOW_MHZ 25 /* Lower bound on bus speed sanity */
#define IOC4_CALIBRATE_HIGH_MHZ 75 /* Upper bound on bus speed sanity */
#define IOC4_CALIBRATE_DEFAULT_MHZ 66 /* Assumed if sanity check fails */
/************************
* Submodule management *
************************/
static DEFINE_MUTEX(ioc4_mutex);
static LIST_HEAD(ioc4_devices);
static LIST_HEAD(ioc4_submodules);
/* Register an IOC4 submodule */
int
ioc4_register_submodule(struct ioc4_submodule *is)
{
struct ioc4_driver_data *idd;
mutex_lock(&ioc4_mutex);
list_add(&is->is_list, &ioc4_submodules);
/* Initialize submodule for each IOC4 */
if (!is->is_probe)
goto out;
list_for_each_entry(idd, &ioc4_devices, idd_list) {
if (is->is_probe(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule %s probe failed "
"for pci_dev %s",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
out:
mutex_unlock(&ioc4_mutex);
return 0;
}
/* Unregister an IOC4 submodule */
void
ioc4_unregister_submodule(struct ioc4_submodule *is)
{
struct ioc4_driver_data *idd;
mutex_lock(&ioc4_mutex);
list_del(&is->is_list);
/* Remove submodule for each IOC4 */
if (!is->is_remove)
goto out;
list_for_each_entry(idd, &ioc4_devices, idd_list) {
if (is->is_remove(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule %s remove failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
out:
mutex_unlock(&ioc4_mutex);
}
/*********************
* Device management *
*********************/
#define IOC4_CALIBRATE_LOW_LIMIT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ)
#define IOC4_CALIBRATE_HIGH_LIMIT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ)
#define IOC4_CALIBRATE_DEFAULT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ)
#define IOC4_CALIBRATE_END \
(IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD)
#define IOC4_INT_OUT_MODE_TOGGLE 0x7 /* Toggle INT_OUT every COUNT+1 ticks */
/* Determines external interrupt output clock period of the PCI bus an
* IOC4 is attached to. This value can be used to determine the PCI
* bus speed.
*
* IOC4 has a design feature that various internal timers are derived from
* the PCI bus clock. This causes IOC4 device drivers to need to take the
* bus speed into account when setting various register values (e.g. INT_OUT
* register COUNT field, UART divisors, etc). Since this information is
* needed by several subdrivers, it is determined by the main IOC4 driver,
* even though the following code utilizes external interrupt registers
* to perform the speed calculation.
*/
static void __devinit
ioc4_clock_calibrate(struct ioc4_driver_data *idd)
{
union ioc4_int_out int_out;
union ioc4_gpcr gpcr;
unsigned int state, last_state = 1;
struct timespec start_ts, end_ts;
uint64_t start, end, period;
unsigned int count = 0;
/* Enable output */
gpcr.raw = 0;
gpcr.fields.dir = IOC4_GPCR_DIR_0;
gpcr.fields.int_out_en = 1;
writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw);
/* Reset to power-on state */
writel(0, &idd->idd_misc_regs->int_out.raw);
mmiowb();
/* Set up square wave */
int_out.raw = 0;
int_out.fields.count = IOC4_CALIBRATE_COUNT;
int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
int_out.fields.diag = 0;
writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
mmiowb();
/* Check square wave period averaged over some number of cycles */
do {
int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
state = int_out.fields.int_out;
if (!last_state && state) {
count++;
if (count == IOC4_CALIBRATE_END) {
ktime_get_ts(&end_ts);
break;
} else if (count == IOC4_CALIBRATE_DISCARD)
ktime_get_ts(&start_ts);
}
last_state = state;
} while (1);
/* Calculation rearranged to preserve intermediate precision.
* Logically:
* 1. "end - start" gives us the measurement period over all
* the square wave cycles.
* 2. Divide by number of square wave cycles to get the period
* of a square wave cycle.
* 3. Divide by 2*(int_out.fields.count+1), which is the formula
* by which the IOC4 generates the square wave, to get the
* period of an IOC4 INT_OUT count.
*/
end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
period = (end - start) /
(IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
/* Bounds check the result. */
if (period > IOC4_CALIBRATE_LOW_LIMIT ||
period < IOC4_CALIBRATE_HIGH_LIMIT) {
printk(KERN_INFO
"IOC4 %s: Clock calibration failed. Assuming"
"PCI clock is %d ns.\n",
pci_name(idd->idd_pdev),
IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
period = IOC4_CALIBRATE_DEFAULT;
} else {
u64 ns = period;
do_div(ns, IOC4_EXTINT_COUNT_DIVISOR);
printk(KERN_DEBUG
"IOC4 %s: PCI clock is %llu ns.\n",
pci_name(idd->idd_pdev), (unsigned long long)ns);
}
/* Remember results. We store the extint clock period rather
* than the PCI clock period so that greater precision is
* retained. Divide by IOC4_EXTINT_COUNT_DIVISOR to get
* PCI clock period.
*/
idd->count_period = period;
}
/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
* Each brings out different combinations of IOC4 signals, thus.
* the IOC4 subdrivers need to know to which we're attached.
*
* We look for the presence of a SCSI (IO9) or SATA (IO10) controller
* on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
* If neither is present, it's a PCI-RT.
*/
static unsigned int __devinit
ioc4_variant(struct ioc4_driver_data *idd)
{
struct pci_dev *pdev = NULL;
int found = 0;
/* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
do {
pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
if (pdev &&
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
} while (pdev && !found);
if (NULL != pdev) {
pci_dev_put(pdev);
return IOC4_VARIANT_IO9;
}
/* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
pdev = NULL;
do {
pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
if (pdev &&
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
} while (pdev && !found);
if (NULL != pdev) {
pci_dev_put(pdev);
return IOC4_VARIANT_IO10;
}
/* PCI-RT: No SCSI/SATA controller will be present */
return IOC4_VARIANT_PCI_RT;
}
static void
ioc4_load_modules(struct work_struct *work)
{
request_module("sgiioc4");
}
static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
/* Adds a new instance of an IOC4 card */
static int __devinit
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc4_driver_data *idd;
struct ioc4_submodule *is;
uint32_t pcmd;
int ret;
/* Enable IOC4 and take ownership of it */
if ((ret = pci_enable_device(pdev))) {
printk(KERN_WARNING
"%s: Failed to enable IOC4 device for pci_dev %s.\n",
__func__, pci_name(pdev));
goto out;
}
pci_set_master(pdev);
/* Set up per-IOC4 data */
idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL);
if (!idd) {
printk(KERN_WARNING
"%s: Failed to allocate IOC4 data for pci_dev %s.\n",
__func__, pci_name(pdev));
ret = -ENODEV;
goto out_idd;
}
idd->idd_pdev = pdev;
idd->idd_pci_id = pci_id;
/* Map IOC4 misc registers. These are shared between subdevices
* so the main IOC4 module manages them.
*/
idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0);
if (!idd->idd_bar0) {
printk(KERN_WARNING
"%s: Unable to find IOC4 misc resource "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_pci;
}
if (!request_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs),
"ioc4_misc")) {
printk(KERN_WARNING
"%s: Unable to request IOC4 misc region "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_pci;
}
idd->idd_misc_regs = ioremap(idd->idd_bar0,
sizeof(struct ioc4_misc_regs));
if (!idd->idd_misc_regs) {
printk(KERN_WARNING
"%s: Unable to remap IOC4 misc region "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_misc_region;
}
/* Failsafe portion of per-IOC4 initialization */
/* Detect card variant */
idd->idd_variant = ioc4_variant(idd);
printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
/* Initialize IOC4 */
pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
/* Determine PCI clock */
ioc4_clock_calibrate(idd);
/* Disable/clear all interrupts. Need to do this here lest
* one submodule request the shared IOC4 IRQ, but interrupt
* is generated by a different subdevice.
*/
/* Disable */
writel(~0, &idd->idd_misc_regs->other_iec.raw);
writel(~0, &idd->idd_misc_regs->sio_iec);
/* Clear (i.e. acknowledge) */
writel(~0, &idd->idd_misc_regs->other_ir.raw);
writel(~0, &idd->idd_misc_regs->sio_ir);
/* Track PCI-device specific data */
idd->idd_serial_data = NULL;
pci_set_drvdata(idd->idd_pdev, idd);
mutex_lock(&ioc4_mutex);
list_add_tail(&idd->idd_list, &ioc4_devices);
/* Add this IOC4 to all submodules */
list_for_each_entry(is, &ioc4_submodules, is_list) {
if (is->is_probe && is->is_probe(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule 0x%s probe failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
mutex_unlock(&ioc4_mutex);
/* Request sgiioc4 IDE driver on boards that bring that functionality
* off of IOC4. The root filesystem may be hosted on a drive connected
* to IOC4, so we need to make sure the sgiioc4 driver is loaded as it
* won't be picked up by modprobes due to the ioc4 module owning the
* PCI device.
*/
if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
/* Request the module from a work procedure as the modprobe
* goes out to a userland helper and that will hang if done
* directly from ioc4_probe().
*/
printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
schedule_work(&ioc4_load_modules_work);
}
return 0;
out_misc_region:
release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
out_pci:
kfree(idd);
out_idd:
pci_disable_device(pdev);
out:
return ret;
}
/* Removes a particular instance of an IOC4 card. */
static void __devexit
ioc4_remove(struct pci_dev *pdev)
{
struct ioc4_submodule *is;
struct ioc4_driver_data *idd;
idd = pci_get_drvdata(pdev);
/* Remove this IOC4 from all submodules */
mutex_lock(&ioc4_mutex);
list_for_each_entry(is, &ioc4_submodules, is_list) {
if (is->is_remove && is->is_remove(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule 0x%s remove failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
mutex_unlock(&ioc4_mutex);
/* Release resources */
iounmap(idd->idd_misc_regs);
if (!idd->idd_bar0) {
printk(KERN_WARNING
"%s: Unable to get IOC4 misc mapping for pci_dev %s. "
"Device removal may be incomplete.\n",
__func__, pci_name(idd->idd_pdev));
}
release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
/* Disable IOC4 and relinquish */
pci_disable_device(pdev);
/* Remove and free driver data */
mutex_lock(&ioc4_mutex);
list_del(&idd->idd_list);
mutex_unlock(&ioc4_mutex);
kfree(idd);
}
static struct pci_device_id ioc4_id_table[] = {
{PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
{0}
};
static struct pci_driver ioc4_driver = {
.name = "IOC4",
.id_table = ioc4_id_table,
.probe = ioc4_probe,
.remove = __devexit_p(ioc4_remove),
};
MODULE_DEVICE_TABLE(pci, ioc4_id_table);
/*********************
* Module management *
*********************/
/* Module load */
static int __init
ioc4_init(void)
{
return pci_register_driver(&ioc4_driver);
}
/* Module unload */
static void __exit
ioc4_exit(void)
{
/* Ensure ioc4_load_modules() has completed before exiting */
flush_work_sync(&ioc4_load_modules_work);
pci_unregister_driver(&ioc4_driver);
}
module_init(ioc4_init);
module_exit(ioc4_exit);
MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>");
MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ioc4_register_submodule);
EXPORT_SYMBOL(ioc4_unregister_submodule);
| gpl-2.0 |
Ekylypse/android_kernel_kltesprsport | drivers/mtd/maps/rpxlite.c | 8187 | 1371 | /*
* Handle mapping of the flash on the RPX Lite and CLLF boards
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#define WINDOW_ADDR 0xfe000000
#define WINDOW_SIZE 0x800000
static struct mtd_info *mymtd;
static struct map_info rpxlite_map = {
.name = "RPX",
.size = WINDOW_SIZE,
.bankwidth = 4,
.phys = WINDOW_ADDR,
};
static int __init init_rpxlite(void)
{
printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
if (!rpxlite_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
simple_map_init(&rpxlite_map);
mymtd = do_map_probe("cfi_probe", &rpxlite_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
mtd_device_register(mymtd, NULL, 0);
return 0;
}
iounmap((void *)rpxlite_map.virt);
return -ENXIO;
}
static void __exit cleanup_rpxlite(void)
{
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (rpxlite_map.virt) {
iounmap((void *)rpxlite_map.virt);
rpxlite_map.virt = 0;
}
}
module_init(init_rpxlite);
module_exit(cleanup_rpxlite);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
| gpl-2.0 |
artefvck/X_Artefvck | mm/failslab.c | 8955 | 1316 | #include <linux/fault-inject.h>
#include <linux/slab.h>
static struct {
struct fault_attr attr;
u32 ignore_gfp_wait;
int cache_filter;
} failslab = {
.attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = 1,
.cache_filter = 0,
};
bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
{
if (gfpflags & __GFP_NOFAIL)
return false;
if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
return false;
if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
return false;
return should_fail(&failslab.attr, size);
}
static int __init setup_failslab(char *str)
{
return setup_fault_attr(&failslab.attr, str);
}
__setup("failslab=", setup_failslab);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init failslab_debugfs_init(void)
{
struct dentry *dir;
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr);
if (IS_ERR(dir))
return PTR_ERR(dir);
if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
&failslab.ignore_gfp_wait))
goto fail;
if (!debugfs_create_bool("cache-filter", mode, dir,
&failslab.cache_filter))
goto fail;
return 0;
fail:
debugfs_remove_recursive(dir);
return -ENOMEM;
}
late_initcall(failslab_debugfs_init);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
| gpl-2.0 |
bigzz/linux-btrfs | drivers/scsi/libfc/fc_elsct.c | 9467 | 3696 | /*
* Copyright(c) 2008 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Provide interface to send ELS/CT FC frames
*/
#include <linux/export.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/fc/fc_ns.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
#include "fc_libfc.h"
/**
* fc_elsct_send() - Send an ELS or CT frame
* @lport: The local port to send the frame on
* @did: The destination ID for the frame
* @fp: The frame to be sent
* @op: The operational code
* @resp: The callback routine when the response is received
* @arg: The argument to pass to the response callback routine
* @timer_msec: The timeout period for the frame (in msecs)
*/
struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
struct fc_frame *fp, unsigned int op,
void (*resp)(struct fc_seq *,
struct fc_frame *,
void *),
void *arg, u32 timer_msec)
{
enum fc_rctl r_ctl;
enum fc_fh_type fh_type;
int rc;
/* ELS requests */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
else {
/* CT requests */
rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
}
if (rc) {
fc_frame_free(fp);
return NULL;
}
fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FCTL_REQ, 0);
return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
EXPORT_SYMBOL(fc_elsct_send);
/**
* fc_elsct_init() - Initialize the ELS/CT layer
* @lport: The local port to initialize the ELS/CT layer for
*/
int fc_elsct_init(struct fc_lport *lport)
{
if (!lport->tt.elsct_send)
lport->tt.elsct_send = fc_elsct_send;
return 0;
}
EXPORT_SYMBOL(fc_elsct_init);
/**
* fc_els_resp_type() - Return a string describing the ELS response
* @fp: The frame pointer or possible error code
*/
const char *fc_els_resp_type(struct fc_frame *fp)
{
const char *msg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
if (IS_ERR(fp)) {
switch (-PTR_ERR(fp)) {
case FC_NO_ERR:
msg = "response no error";
break;
case FC_EX_TIMEOUT:
msg = "response timeout";
break;
case FC_EX_CLOSED:
msg = "response closed";
break;
default:
msg = "response unknown error";
break;
}
} else {
fh = fc_frame_header_get(fp);
switch (fh->fh_type) {
case FC_TYPE_ELS:
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
msg = "accept";
break;
case ELS_LS_RJT:
msg = "reject";
break;
default:
msg = "response unknown ELS";
break;
}
break;
case FC_TYPE_CT:
ct = fc_frame_payload_get(fp, sizeof(*ct));
if (ct) {
switch (ntohs(ct->ct_cmd)) {
case FC_FS_ACC:
msg = "CT accept";
break;
case FC_FS_RJT:
msg = "CT reject";
break;
default:
msg = "response unknown CT";
break;
}
} else {
msg = "short CT response";
}
break;
default:
msg = "response not ELS or CT";
break;
}
}
return msg;
}
| gpl-2.0 |
Carbonite12/Jajo_LG | fs/proc/proc_tty.c | 10747 | 4805 | /*
* proc_tty.c -- handles /proc/tty
*
* Copyright 1997, Theodore Ts'o
*/
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/tty.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
/*
* The /proc/tty directory inodes...
*/
static struct proc_dir_entry *proc_tty_ldisc, *proc_tty_driver;
/*
* This is the handler for /proc/tty/drivers
*/
static void show_tty_range(struct seq_file *m, struct tty_driver *p,
dev_t from, int num)
{
seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown");
seq_printf(m, "/dev/%-8s ", p->name);
if (p->num > 1) {
seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from),
MINOR(from) + num - 1);
} else {
seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from));
}
switch (p->type) {
case TTY_DRIVER_TYPE_SYSTEM:
seq_puts(m, "system");
if (p->subtype == SYSTEM_TYPE_TTY)
seq_puts(m, ":/dev/tty");
else if (p->subtype == SYSTEM_TYPE_SYSCONS)
seq_puts(m, ":console");
else if (p->subtype == SYSTEM_TYPE_CONSOLE)
seq_puts(m, ":vtmaster");
break;
case TTY_DRIVER_TYPE_CONSOLE:
seq_puts(m, "console");
break;
case TTY_DRIVER_TYPE_SERIAL:
seq_puts(m, "serial");
break;
case TTY_DRIVER_TYPE_PTY:
if (p->subtype == PTY_TYPE_MASTER)
seq_puts(m, "pty:master");
else if (p->subtype == PTY_TYPE_SLAVE)
seq_puts(m, "pty:slave");
else
seq_puts(m, "pty");
break;
default:
seq_printf(m, "type:%d.%d", p->type, p->subtype);
}
seq_putc(m, '\n');
}
static int show_tty_driver(struct seq_file *m, void *v)
{
struct tty_driver *p = list_entry(v, struct tty_driver, tty_drivers);
dev_t from = MKDEV(p->major, p->minor_start);
dev_t to = from + p->num;
if (&p->tty_drivers == tty_drivers.next) {
/* pseudo-drivers first */
seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty");
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0);
seq_puts(m, "system:/dev/tty\n");
seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console");
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1);
seq_puts(m, "system:console\n");
#ifdef CONFIG_UNIX98_PTYS
seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx");
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2);
seq_puts(m, "system\n");
#endif
#ifdef CONFIG_VT
seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0");
seq_printf(m, "%3d %7d ", TTY_MAJOR, 0);
seq_puts(m, "system:vtmaster\n");
#endif
}
while (MAJOR(from) < MAJOR(to)) {
dev_t next = MKDEV(MAJOR(from)+1, 0);
show_tty_range(m, p, from, next - from);
from = next;
}
if (from != to)
show_tty_range(m, p, from, to - from);
return 0;
}
/* iterator */
static void *t_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&tty_mutex);
return seq_list_start(&tty_drivers, *pos);
}
static void *t_next(struct seq_file *m, void *v, loff_t *pos)
{
return seq_list_next(v, &tty_drivers, pos);
}
static void t_stop(struct seq_file *m, void *v)
{
mutex_unlock(&tty_mutex);
}
static const struct seq_operations tty_drivers_op = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = show_tty_driver
};
static int tty_drivers_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tty_drivers_op);
}
static const struct file_operations proc_tty_drivers_operations = {
.open = tty_drivers_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* This function is called by tty_register_driver() to handle
* registering the driver's /proc handler into /proc/tty/driver/<foo>
*/
void proc_tty_register_driver(struct tty_driver *driver)
{
struct proc_dir_entry *ent;
if (!driver->driver_name || driver->proc_entry ||
!driver->ops->proc_fops)
return;
ent = proc_create_data(driver->driver_name, 0, proc_tty_driver,
driver->ops->proc_fops, driver);
driver->proc_entry = ent;
}
/*
* This function is called by tty_unregister_driver()
*/
void proc_tty_unregister_driver(struct tty_driver *driver)
{
struct proc_dir_entry *ent;
ent = driver->proc_entry;
if (!ent)
return;
remove_proc_entry(driver->driver_name, proc_tty_driver);
driver->proc_entry = NULL;
}
/*
* Called by proc_root_init() to initialize the /proc/tty subtree
*/
void __init proc_tty_init(void)
{
if (!proc_mkdir("tty", NULL))
return;
proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL);
/*
* /proc/tty/driver/serial reveals the exact character counts for
* serial links which is just too easy to abuse for inferring
* password lengths and inter-keystroke timings during password
* entry.
*/
proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL);
proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops);
proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations);
}
| gpl-2.0 |
manabian/linux-lpc | net/rxrpc/utils.c | 252 | 1424 | /* Utility routines
*
* Copyright (C) 2015 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include "ar-internal.h"
/*
* Fill out a peer address from a socket buffer containing a packet.
*/
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
{
memset(srx, 0, sizeof(*srx));
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin);
srx->transport.sin.sin_family = AF_INET;
srx->transport.sin.sin_port = udp_hdr(skb)->source;
srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
return 0;
#ifdef CONFIG_AF_RXRPC_IPV6
case ETH_P_IPV6:
srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin6);
srx->transport.sin6.sin6_family = AF_INET6;
srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
srx->transport.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
return 0;
#endif
default:
pr_warn_ratelimited("AF_RXRPC: Unknown eth protocol %u\n",
ntohs(skb->protocol));
return -EAFNOSUPPORT;
}
}
| gpl-2.0 |
Kali-/tf101-kernel | drivers/crypto/ixp4xx_crypto.c | 252 | 37603 | /*
* Intel IXP4xx NPE-C crypto driver
*
* Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
#define MAX_KEYLEN 32
/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
#define NPE_CTX_LEN 80
#define AES_BLOCK128 16
#define NPE_OP_HASH_VERIFY 0x01
#define NPE_OP_CCM_ENABLE 0x04
#define NPE_OP_CRYPT_ENABLE 0x08
#define NPE_OP_HASH_ENABLE 0x10
#define NPE_OP_NOT_IN_PLACE 0x20
#define NPE_OP_HMAC_DISABLE 0x40
#define NPE_OP_CRYPT_ENCRYPT 0x80
#define NPE_OP_CCM_GEN_MIC 0xcc
#define NPE_OP_HASH_GEN_ICV 0x50
#define NPE_OP_ENC_GEN_KEY 0xc9
#define MOD_ECB 0x0000
#define MOD_CTR 0x1000
#define MOD_CBC_ENC 0x2000
#define MOD_CBC_DEC 0x3000
#define MOD_CCM_ENC 0x4000
#define MOD_CCM_DEC 0x5000
#define KEYLEN_128 4
#define KEYLEN_192 6
#define KEYLEN_256 8
#define CIPH_DECR 0x0000
#define CIPH_ENCR 0x0400
#define MOD_DES 0x0000
#define MOD_TDEA2 0x0100
#define MOD_3DES 0x0200
#define MOD_AES 0x0800
#define MOD_AES128 (0x0800 | KEYLEN_128)
#define MOD_AES192 (0x0900 | KEYLEN_192)
#define MOD_AES256 (0x0a00 | KEYLEN_256)
#define MAX_IVLEN 16
#define NPE_ID 2 /* NPE C */
#define NPE_QLEN 16
/* Space for registering when the first
* NPE_QLEN crypt_ctl are busy */
#define NPE_QLEN_TOTAL 64
#define SEND_QID 29
#define RECV_QID 30
#define CTL_FLAG_UNUSED 0x0000
#define CTL_FLAG_USED 0x1000
#define CTL_FLAG_PERFORM_ABLK 0x0001
#define CTL_FLAG_GEN_ICV 0x0002
#define CTL_FLAG_GEN_REVAES 0x0004
#define CTL_FLAG_PERFORM_AEAD 0x0008
#define CTL_FLAG_MASK 0x000f
#define HMAC_IPAD_VALUE 0x36
#define HMAC_OPAD_VALUE 0x5C
#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
#define MD5_DIGEST_SIZE 16
struct buffer_desc {
u32 phys_next;
#ifdef __ARMEB__
u16 buf_len;
u16 pkt_len;
#else
u16 pkt_len;
u16 buf_len;
#endif
u32 phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
enum dma_data_direction dir;
};
struct crypt_ctl {
#ifdef __ARMEB__
u8 mode; /* NPE_OP_* operation mode */
u8 init_len;
u16 reserved;
#else
u16 reserved;
u8 init_len;
u8 mode; /* NPE_OP_* operation mode */
#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
u32 icv_rev_aes; /* icv or rev aes */
u32 src_buf;
u32 dst_buf;
#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
u16 crypt_offs; /* Cryption start offset */
u16 crypt_len; /* Cryption data length */
#else
u16 auth_len; /* Authentication data length */
u16 auth_offs; /* Authentication start offset */
u16 crypt_len; /* Cryption data length */
u16 crypt_offs; /* Cryption start offset */
#endif
u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
u32 crypto_ctx; /* NPE Crypto Param structure address */
/* Used by Host: 4*4 bytes*/
unsigned ctl_flags;
union {
struct ablkcipher_request *ablk_req;
struct aead_request *aead_req;
struct crypto_tfm *tfm;
} data;
struct buffer_desc *regist_buf;
u8 *regist_ptr;
};
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
};
struct aead_ctx {
struct buffer_desc *buffer;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
int encrypt;
};
struct ix_hash_algo {
u32 cfgword;
unsigned char *icv;
};
struct ix_sa_dir {
unsigned char *npe_ctx;
dma_addr_t npe_ctx_phys;
int npe_ctx_idx;
u8 npe_mode;
};
struct ixp_ctx {
struct ix_sa_dir encrypt;
struct ix_sa_dir decrypt;
int authkey_len;
u8 authkey[MAX_KEYLEN];
int enckey_len;
u8 enckey[MAX_KEYLEN];
u8 salt[MAX_IVLEN];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
unsigned salted;
atomic_t configuring;
struct completion completion;
};
struct ixp_alg {
struct crypto_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
int registered;
};
static const struct ix_hash_algo hash_alg_md5 = {
.cfgword = 0xAA010004,
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
};
static const struct ix_hash_algo hash_alg_sha1 = {
.cfgword = 0x00000005,
.icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
"\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
};
static struct npe *npe_c;
static struct dma_pool *buffer_pool = NULL;
static struct dma_pool *ctx_pool = NULL;
static struct crypt_ctl *crypt_virt = NULL;
static dma_addr_t crypt_phys;
static int support_aes = 1;
static void dev_release(struct device *dev)
{
return;
}
#define DRIVER_NAME "ixp4xx_crypto"
static struct platform_device pseudo_dev = {
.name = DRIVER_NAME,
.id = 0,
.num_resources = 0,
.dev = {
.coherent_dma_mask = DMA_BIT_MASK(32),
.release = dev_release,
}
};
static struct device *dev = &pseudo_dev.dev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
}
static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
{
return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
}
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
}
static int setup_crypt_desc(void)
{
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_KERNEL);
if (!crypt_virt)
return -ENOMEM;
memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
static spinlock_t desc_lock;
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
static int idx = 0;
unsigned long flags;
spin_lock_irqsave(&desc_lock, flags);
if (unlikely(!crypt_virt))
setup_crypt_desc();
if (unlikely(!crypt_virt)) {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN)
idx = 0;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&desc_lock, flags);
return crypt_virt +i;
} else {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
}
static spinlock_t emerg_lock;
static struct crypt_ctl *get_crypt_desc_emerg(void)
{
int i;
static int idx = NPE_QLEN;
struct crypt_ctl *desc;
unsigned long flags;
desc = get_crypt_desc();
if (desc)
return desc;
if (unlikely(!crypt_virt))
return NULL;
spin_lock_irqsave(&emerg_lock, flags);
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN_TOTAL)
idx = NPE_QLEN;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&emerg_lock, flags);
return crypt_virt +i;
} else {
spin_unlock_irqrestore(&emerg_lock, flags);
return NULL;
}
}
static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
{
while (buf) {
struct buffer_desc *buf1;
u32 phys1;
buf1 = buf->next;
phys1 = buf->phys_next;
dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
}
}
static struct tasklet_struct crypto_done_tasklet;
static void finish_scattered_hmac(struct crypt_ctl *crypt)
{
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int authsize = crypto_aead_authsize(tfm);
int decryptlen = req->cryptlen - authsize;
if (req_ctx->encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
static void one_packet(dma_addr_t phys)
{
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
failed = phys & 0x1 ? -EBADMSG : 0;
phys &= ~0x3;
crypt = crypt_phys2virt(phys);
switch (crypt->ctl_flags & CTL_FLAG_MASK) {
case CTL_FLAG_PERFORM_AEAD: {
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
if (req_ctx->hmac_virt) {
finish_scattered_hmac(crypt);
}
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
struct ablkcipher_request *req = crypt->data.ablk_req;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
if (req_ctx->dst) {
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
}
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_GEN_ICV:
ctx = crypto_tfm_ctx(crypt->data.tfm);
dma_pool_free(ctx_pool, crypt->regist_ptr,
crypt->regist_buf->phys_addr);
dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
case CTL_FLAG_GEN_REVAES:
ctx = crypto_tfm_ctx(crypt->data.tfm);
*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
default:
BUG();
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
}
static void irqhandler(void *_unused)
{
tasklet_schedule(&crypto_done_tasklet);
}
static void crypto_done_action(unsigned long arg)
{
int i;
for(i=0; i<4; i++) {
dma_addr_t phys = qmgr_get_entry(RECV_QID);
if (!phys)
return;
one_packet(phys);
}
tasklet_schedule(&crypto_done_tasklet);
}
static int init_ixp_crypto(void)
{
int ret = -ENODEV;
u32 msg[2] = { 0, 0 };
if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
return ret;
}
npe_c = npe_request(NPE_ID);
if (!npe_c)
return ret;
if (!npe_running(npe_c)) {
ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
if (ret) {
return ret;
}
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
} else {
if (npe_send_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
}
switch ((msg[1]>>16) & 0xff) {
case 3:
printk(KERN_WARNING "Firmware of %s lacks AES support\n",
npe_name(npe_c));
support_aes = 0;
break;
case 4:
case 5:
support_aes = 1;
break;
default:
printk(KERN_ERR "Firmware of %s lacks crypto support\n",
npe_name(npe_c));
return -ENODEV;
}
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
*/
BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
buffer_pool = dma_pool_create("buffer", dev,
sizeof(struct buffer_desc), 32, 0);
ret = -ENOMEM;
if (!buffer_pool) {
goto err;
}
ctx_pool = dma_pool_create("context", dev,
NPE_CTX_LEN, 16, 0);
if (!ctx_pool) {
goto err;
}
ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
"ixp_crypto:out", NULL);
if (ret)
goto err;
ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
"ixp_crypto:in", NULL);
if (ret) {
qmgr_release_queue(SEND_QID);
goto err;
}
qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
qmgr_enable_irq(RECV_QID);
return 0;
npe_error:
printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
ret = -EIO;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
if (buffer_pool)
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
return ret;
}
static void release_ixp_crypto(void)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
qmgr_release_queue(SEND_QID);
qmgr_release_queue(RECV_QID);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
if (crypt_virt) {
dma_free_coherent(dev,
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
crypt_virt, crypt_phys);
}
return;
}
static void reset_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dir->npe_ctx_idx = 0;
dir->npe_mode = 0;
}
static int init_sa_dir(struct ix_sa_dir *dir)
{
dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
if (!dir->npe_ctx) {
return -ENOMEM;
}
reset_sa_dir(dir);
return 0;
}
static void free_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
}
static int init_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
atomic_set(&ctx->configuring, 0);
ret = init_sa_dir(&ctx->encrypt);
if (ret)
return ret;
ret = init_sa_dir(&ctx->decrypt);
if (ret) {
free_sa_dir(&ctx->encrypt);
}
return ret;
}
static int init_tfm_ablk(struct crypto_tfm *tfm)
{
tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
return init_tfm(tfm);
}
static int init_tfm_aead(struct crypto_tfm *tfm)
{
tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
return init_tfm(tfm);
}
static void exit_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
free_sa_dir(&ctx->encrypt);
free_sa_dir(&ctx->decrypt);
}
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
int init_len, u32 ctx_addr, const u8 *key, int key_len)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypt_ctl *crypt;
struct buffer_desc *buf;
int i;
u8 *pad;
u32 pad_phys, buf_phys;
BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
if (!pad)
return -ENOMEM;
buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
if (!buf) {
dma_pool_free(ctx_pool, pad, pad_phys);
return -ENOMEM;
}
crypt = get_crypt_desc_emerg();
if (!crypt) {
dma_pool_free(ctx_pool, pad, pad_phys);
dma_pool_free(buffer_pool, buf, buf_phys);
return -EAGAIN;
}
memcpy(pad, key, key_len);
memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
pad[i] ^= xpad;
}
crypt->data.tfm = tfm;
crypt->regist_ptr = pad;
crypt->regist_buf = buf;
crypt->auth_offs = 0;
crypt->auth_len = HMAC_PAD_BLOCKLEN;
crypt->crypto_ctx = ctx_addr;
crypt->src_buf = buf_phys;
crypt->icv_rev_aes = target;
crypt->mode = NPE_OP_HASH_GEN_ICV;
crypt->init_len = init_len;
crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
buf->next = 0;
buf->buf_len = HMAC_PAD_BLOCKLEN;
buf->pkt_len = 0;
buf->phys_addr = pad_phys;
atomic_inc(&ctx->configuring);
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return 0;
}
static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
const u8 *key, int key_len, unsigned digest_len)
{
u32 itarget, otarget, npe_ctx_addr;
unsigned char *cinfo;
int init_len, ret = 0;
u32 cfgword;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
const struct ix_hash_algo *algo;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx + dir->npe_ctx_idx;
algo = ix_hash(tfm);
/* write cfg word to cryptinfo */
cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
#ifndef __ARMEB__
cfgword ^= 0xAA000000; /* change the "byte swap" flags */
#endif
*(u32*)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
/* write ICV to cryptinfo */
memcpy(cinfo, algo->icv, digest_len);
cinfo += digest_len;
itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+ sizeof(algo->cfgword);
otarget = itarget + digest_len;
init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
dir->npe_ctx_idx += init_len;
dir->npe_mode |= NPE_OP_HASH_ENABLE;
if (!encrypt)
dir->npe_mode |= NPE_OP_HASH_VERIFY;
ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
init_len, npe_ctx_addr, key, key_len);
if (ret)
return ret;
return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
init_len, npe_ctx_addr, key, key_len);
}
static int gen_rev_aes_key(struct crypto_tfm *tfm)
{
struct crypt_ctl *crypt;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct ix_sa_dir *dir = &ctx->decrypt;
crypt = get_crypt_desc_emerg();
if (!crypt) {
return -EAGAIN;
}
*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
crypt->data.tfm = tfm;
crypt->crypt_offs = 0;
crypt->crypt_len = AES_BLOCK128;
crypt->src_buf = 0;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
crypt->mode = NPE_OP_ENC_GEN_KEY;
crypt->init_len = dir->npe_ctx_idx;
crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
atomic_inc(&ctx->configuring);
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return 0;
}
static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
const u8 *key, int key_len)
{
u8 *cinfo;
u32 cipher_cfg;
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
if (encrypt) {
cipher_cfg = cipher_cfg_enc(tfm);
dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
} else {
cipher_cfg = cipher_cfg_dec(tfm);
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else if (cipher_cfg & MOD_3DES) {
const u32 *K = (const u32 *)key;
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))))
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
return -EINVAL;
}
} else {
u32 tmp[DES_EXPKEY_WORDS];
if (des_ekey(tmp, key) == 0) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
}
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
cinfo += sizeof(cipher_cfg);
/* write cipher key to cryptinfo */
memcpy(cinfo, key, key_len);
/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
key_len = DES3_EDE_KEY_SIZE;
}
dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
if ((cipher_cfg & MOD_AES) && !encrypt) {
return gen_rev_aes_key(tfm);
}
return 0;
}
static struct buffer_desc *chainup_buffers(struct device *dev,
struct scatterlist *sg, unsigned nbytes,
struct buffer_desc *buf, gfp_t flags,
enum dma_data_direction dir)
{
for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
unsigned len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
u32 next_buf_phys;
void *ptr;
nbytes -= len;
ptr = page_address(sg_page(sg)) + sg->offset;
next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
if (!next_buf) {
buf = NULL;
break;
}
sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
buf->next = next_buf;
buf->phys_next = next_buf_phys;
buf = next_buf;
buf->phys_addr = sg_dma_address(sg);
buf->buf_len = len;
buf->dir = dir;
}
buf->next = NULL;
buf->phys_next = 0;
return buf;
}
static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ret = setup_cipher(&tfm->base, 0, key, key_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
if (ret)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
ret = -EINVAL;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
}
}
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
return ret;
}
static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
/* the nonce is stored in bytes at end of key */
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ablk_setkey(tfm, key, key_len);
}
static int ablk_perform(struct ablkcipher_request *req, int encrypt)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int nbytes = req->nbytes;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
struct buffer_desc src_hook;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
crypt = get_crypt_desc();
if (!crypt)
return -ENOMEM;
crypt->data.ablk_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = 0;
crypt->crypt_len = nbytes;
BUG_ON(ivsize && !req->info);
memcpy(crypt->iv, req->info, ivsize);
if (req->src != req->dst) {
struct buffer_desc dst_hook;
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
goto free_buf_dest;
src_direction = DMA_TO_DEVICE;
req_ctx->dst = dst_hook.next;
crypt->dst_buf = dst_hook.phys_next;
} else {
req_ctx->dst = NULL;
}
req_ctx->src = NULL;
if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
flags, src_direction))
goto free_buf_src;
req_ctx->src = src_hook.next;
crypt->src_buf = src_hook.phys_next;
crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dest:
if (req->src != req->dst) {
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
static int ablk_encrypt(struct ablkcipher_request *req)
{
return ablk_perform(req, 1);
}
static int ablk_decrypt(struct ablkcipher_request *req)
{
return ablk_perform(req, 0);
}
static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u8 iv[CTR_RFC3686_BLOCK_SIZE];
u8 *info = req->info;
int ret;
/* set up counter block */
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
req->info = iv;
ret = ablk_perform(req, 1);
req->info = info;
return ret;
}
static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
unsigned int nbytes)
{
int offset = 0;
if (!nbytes)
return 0;
for (;;) {
if (start < offset + sg->length)
break;
offset += sg->length;
sg = scatterwalk_sg_next(sg);
}
return (start + nbytes > offset + sg->length);
}
static int aead_perform(struct aead_request *req, int encrypt,
int cryptoffset, int eff_cryptlen, u8 *iv)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned ivsize = crypto_aead_ivsize(tfm);
unsigned authsize = crypto_aead_authsize(tfm);
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int cryptlen;
struct buffer_desc *buf, src_hook;
struct aead_ctx *req_ctx = aead_request_ctx(req);
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
if (encrypt) {
dir = &ctx->encrypt;
cryptlen = req->cryptlen;
} else {
dir = &ctx->decrypt;
/* req->cryptlen includes the authsize when decrypting */
cryptlen = req->cryptlen -authsize;
eff_cryptlen -= authsize;
}
crypt = get_crypt_desc();
if (!crypt)
return -ENOMEM;
crypt->data.aead_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = cryptoffset;
crypt->crypt_len = eff_cryptlen;
crypt->auth_offs = 0;
crypt->auth_len = req->assoclen + ivsize + cryptlen;
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
if (req->src != req->dst) {
BUG(); /* -ENOTSUP because of my lazyness */
}
/* ASSOC data */
buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
flags, DMA_TO_DEVICE);
req_ctx->buffer = src_hook.next;
crypt->src_buf = src_hook.phys_next;
if (!buf)
goto out;
/* IV */
sg_init_table(&req_ctx->ivlist, 1);
sg_set_buf(&req_ctx->ivlist, iv, ivsize);
buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
DMA_BIDIRECTIONAL);
if (!buf)
goto free_chain;
if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
goto free_chain;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
}
req_ctx->encrypt = encrypt;
} else {
req_ctx->hmac_virt = NULL;
}
/* Crypt */
buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
DMA_BIDIRECTIONAL);
if (!buf)
goto free_hmac_virt;
if (!req_ctx->hmac_virt) {
crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
}
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
free_hmac_virt:
if (req_ctx->hmac_virt) {
dma_pool_free(buffer_pool, req_ctx->hmac_virt,
crypt->icv_rev_aes);
}
free_chain:
free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
out:
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
int ret;
if (!ctx->enckey_len && !ctx->authkey_len)
return 0;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
if (ret)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
ret = -EINVAL;
goto out;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
}
}
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
return ret;
}
static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
return -EINVAL;
return aead_setup(tfm, authsize);
}
static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
ctx->enckey_len = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckey_len)
goto badkey;
ctx->authkey_len = keylen - ctx->enckey_len;
memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
memcpy(ctx->authkey, key, ctx->authkey_len);
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static int aead_encrypt(struct aead_request *req)
{
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
return aead_perform(req, 1, req->assoclen + ivsize,
req->cryptlen, req->iv);
}
static int aead_decrypt(struct aead_request *req)
{
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
return aead_perform(req, 0, req->assoclen + ivsize,
req->cryptlen, req->iv);
}
static int aead_givencrypt(struct aead_givcrypt_request *req)
{
struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned len, ivsize = crypto_aead_ivsize(tfm);
__be64 seq;
/* copied from eseqiv.c */
if (!ctx->salted) {
get_random_bytes(ctx->salt, ivsize);
ctx->salted = 1;
}
memcpy(req->areq.iv, ctx->salt, ivsize);
len = ivsize;
if (ivsize > sizeof(u64)) {
memset(req->giv, 0, ivsize - sizeof(u64));
len = sizeof(u64);
}
seq = cpu_to_be64(req->seq);
memcpy(req->giv + ivsize - len, &seq, len);
return aead_perform(&req->areq, 1, req->areq.assoclen,
req->areq.cryptlen +ivsize, req->giv);
}
static struct ixp_alg ixp4xx_algos[] = {
{
.crypto = {
.cra_name = "cbc(des)",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "ecb(des)",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "ecb(des3_ede)",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.cra_name = "cbc(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.cra_name = "ecb(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
}, {
.crypto = {
.cra_name = "ctr(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
.cra_name = "rfc3686(ctr(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
.setkey = ablk_rfc3686_setkey,
.encrypt = ablk_rfc3686_crypt,
.decrypt = ablk_rfc3686_crypt }
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
} };
#define IXP_POSTFIX "-ixp4xx"
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i,err ;
if (platform_device_register(&pseudo_dev))
return -ENODEV;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
err = init_ixp_crypto();
if (err) {
platform_device_unregister(&pseudo_dev);
return err;
}
for (i=0; i< num; i++) {
struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s"IXP_POSTFIX, cra->cra_name) >=
CRYPTO_MAX_ALG_NAME)
{
continue;
}
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
continue;
}
if (!ixp4xx_algos[i].hash) {
/* block ciphers */
cra->cra_type = &crypto_ablkcipher_type;
cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC;
if (!cra->cra_ablkcipher.setkey)
cra->cra_ablkcipher.setkey = ablk_setkey;
if (!cra->cra_ablkcipher.encrypt)
cra->cra_ablkcipher.encrypt = ablk_encrypt;
if (!cra->cra_ablkcipher.decrypt)
cra->cra_ablkcipher.decrypt = ablk_decrypt;
cra->cra_init = init_tfm_ablk;
} else {
/* authenc */
cra->cra_type = &crypto_aead_type;
cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_ASYNC;
cra->cra_aead.setkey = aead_setkey;
cra->cra_aead.setauthsize = aead_setauthsize;
cra->cra_aead.encrypt = aead_encrypt;
cra->cra_aead.decrypt = aead_decrypt;
cra->cra_aead.givencrypt = aead_givencrypt;
cra->cra_init = init_tfm_aead;
}
cra->cra_ctxsize = sizeof(struct ixp_ctx);
cra->cra_module = THIS_MODULE;
cra->cra_alignmask = 3;
cra->cra_priority = 300;
cra->cra_exit = exit_tfm;
if (crypto_register_alg(cra))
printk(KERN_ERR "Failed to register '%s'\n",
cra->cra_name);
else
ixp4xx_algos[i].registered = 1;
}
return 0;
}
static void __exit ixp_module_exit(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto();
platform_device_unregister(&pseudo_dev);
}
module_init(ixp_module_init);
module_exit(ixp_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
MODULE_DESCRIPTION("IXP4xx hardware crypto");
| gpl-2.0 |
HCDRJacob/htc-kernel-wildfire-2.6.32 | drivers/char/ipmi/ipmi_poweroff.c | 508 | 19949 | /*
* ipmi_poweroff.c
*
* MontaVista IPMI Poweroff extension to sys_reboot
*
* Author: MontaVista Software, Inc.
* Steven Dake <sdake@mvista.com>
* Corey Minyard <cminyard@mvista.com>
* source@mvista.com
*
* Copyright 2002,2004 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/completion.h>
#include <linux/pm.h>
#include <linux/kdev_t.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#define PFX "IPMI poweroff: "
static void ipmi_po_smi_gone(int if_num);
static void ipmi_po_new_smi(int if_num, struct device *device);
/* Definitions for controlling power off (if the system supports it). It
* conveniently matches the IPMI chassis control values. */
#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */
#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */
/* the IPMI data command */
static int poweroff_powercycle;
/* Which interface to use, -1 means the first we see. */
static int ifnum_to_use = -1;
/* Our local state. */
static int ready;
static ipmi_user_t ipmi_user;
static int ipmi_ifnum;
static void (*specific_poweroff_func)(ipmi_user_t user);
/* Holds the old poweroff function so we can restore it on removal. */
static void (*old_poweroff_func)(void);
static int set_param_ifnum(const char *val, struct kernel_param *kp)
{
int rv = param_set_int(val, kp);
if (rv)
return rv;
if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
return 0;
ipmi_po_smi_gone(ipmi_ifnum);
ipmi_po_new_smi(ifnum_to_use, NULL);
return 0;
}
module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
&ifnum_to_use, 0644);
MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
"timer. Setting to -1 defaults to the first registered "
"interface");
/* parameter definition to allow user to flag power cycle */
module_param(poweroff_powercycle, int, 0644);
MODULE_PARM_DESC(poweroff_powercycle,
" Set to non-zero to enable power cycle instead of power"
" down. Power cycle is contingent on hardware support,"
" otherwise it defaults back to power down.");
/* Stuff from the get device id command. */
static unsigned int mfg_id;
static unsigned int prod_id;
static unsigned char capabilities;
static unsigned char ipmi_version;
/*
* We use our own messages for this operation, we don't let the system
* allocate them, since we may be in a panic situation. The whole
* thing is single-threaded, anyway, so multiple messages are not
* required.
*/
static atomic_t dummy_count = ATOMIC_INIT(0);
static void dummy_smi_free(struct ipmi_smi_msg *msg)
{
atomic_dec(&dummy_count);
}
static void dummy_recv_free(struct ipmi_recv_msg *msg)
{
atomic_dec(&dummy_count);
}
static struct ipmi_smi_msg halt_smi_msg = {
.done = dummy_smi_free
};
static struct ipmi_recv_msg halt_recv_msg = {
.done = dummy_recv_free
};
/*
* Code to send a message and wait for the reponse.
*/
static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
{
struct completion *comp = recv_msg->user_msg_data;
if (comp)
complete(comp);
}
static struct ipmi_user_hndl ipmi_poweroff_handler = {
.ipmi_recv_hndl = receive_handler
};
static int ipmi_request_wait_for_response(ipmi_user_t user,
struct ipmi_addr *addr,
struct kernel_ipmi_msg *send_msg)
{
int rv;
struct completion comp;
init_completion(&comp);
rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
&halt_smi_msg, &halt_recv_msg, 0);
if (rv)
return rv;
wait_for_completion(&comp);
return halt_recv_msg.msg.data[0];
}
/* Wait for message to complete, spinning. */
static int ipmi_request_in_rc_mode(ipmi_user_t user,
struct ipmi_addr *addr,
struct kernel_ipmi_msg *send_msg)
{
int rv;
atomic_set(&dummy_count, 2);
rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
&halt_smi_msg, &halt_recv_msg, 0);
if (rv) {
atomic_set(&dummy_count, 0);
return rv;
}
/*
* Spin until our message is done.
*/
while (atomic_read(&dummy_count) > 0) {
ipmi_poll_interface(user);
cpu_relax();
}
return halt_recv_msg.msg.data[0];
}
/*
* ATCA Support
*/
#define IPMI_NETFN_ATCA 0x2c
#define IPMI_ATCA_SET_POWER_CMD 0x11
#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
#define IPMI_PICMG_ID 0
#define IPMI_NETFN_OEM 0x2e
#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11
#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A"
#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1
#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051
static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
static void pps_poweroff_atca(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
printk(KERN_INFO PFX "PPS powerdown hook used");
send_msg.netfn = IPMI_NETFN_OEM;
send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
send_msg.data = IPMI_ATCA_PPS_IANA;
send_msg.data_len = 3;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
printk(KERN_ERR PFX "Unable to send ATCA ,"
" IPMI error 0x%x\n", rv);
}
return;
}
static int ipmi_atca_detect(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
/*
* Use get address info to check and see if we are ATCA
*/
send_msg.netfn = IPMI_NETFN_ATCA;
send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
data[0] = IPMI_PICMG_ID;
send_msg.data = data;
send_msg.data_len = sizeof(data);
rv = ipmi_request_wait_for_response(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
mfg_id, prod_id);
if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
&& (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
printk(KERN_INFO PFX
"Installing Pigeon Point Systems Poweroff Hook\n");
atca_oem_poweroff_hook = pps_poweroff_atca;
}
return !rv;
}
static void ipmi_poweroff_atca(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[4];
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
printk(KERN_INFO PFX "Powering down via ATCA power command\n");
/*
* Power down
*/
send_msg.netfn = IPMI_NETFN_ATCA;
send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
data[0] = IPMI_PICMG_ID;
data[1] = 0; /* FRU id */
data[2] = 0; /* Power Level */
data[3] = 0; /* Don't change saved presets */
send_msg.data = data;
send_msg.data_len = sizeof(data);
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
/*
* At this point, the system may be shutting down, and most
* serial drivers (if used) will have interrupts turned off
* it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
* return code
*/
if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
" IPMI error 0x%x\n", rv);
goto out;
}
if (atca_oem_poweroff_hook)
atca_oem_poweroff_hook(user);
out:
return;
}
/*
* CPI1 Support
*/
#define IPMI_NETFN_OEM_1 0xf8
#define OEM_GRP_CMD_SET_RESET_STATE 0x84
#define OEM_GRP_CMD_SET_POWER_STATE 0x82
#define IPMI_NETFN_OEM_8 0xf8
#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80
#define OEM_GRP_CMD_GET_SLOT_GA 0xa3
#define IPMI_NETFN_SENSOR_EVT 0x10
#define IPMI_CMD_GET_EVENT_RECEIVER 0x01
#define IPMI_CPI1_PRODUCT_ID 0x000157
#define IPMI_CPI1_MANUFACTURER_ID 0x0108
static int ipmi_cpi1_detect(ipmi_user_t user)
{
return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
&& (prod_id == IPMI_CPI1_PRODUCT_ID));
}
static void ipmi_poweroff_cpi1(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct ipmi_ipmb_addr ipmb_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
int slot;
unsigned char hotswap_ipmb;
unsigned char aer_addr;
unsigned char aer_lun;
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
/*
* Get IPMI ipmb address
*/
send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
send_msg.data = NULL;
send_msg.data_len = 0;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
slot = halt_recv_msg.msg.data[1];
hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
/*
* Get active event receiver
*/
send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
send_msg.data = NULL;
send_msg.data_len = 0;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
aer_addr = halt_recv_msg.msg.data[1];
aer_lun = halt_recv_msg.msg.data[2];
/*
* Setup IPMB address target instead of local target
*/
ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb_addr.channel = 0;
ipmb_addr.slave_addr = aer_addr;
ipmb_addr.lun = aer_lun;
/*
* Send request hotswap control to remove blade from dpv
*/
send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
send_msg.data = &hotswap_ipmb;
send_msg.data_len = 1;
ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &ipmb_addr,
&send_msg);
/*
* Set reset asserted
*/
send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
send_msg.data = data;
data[0] = 1; /* Reset asserted state */
send_msg.data_len = 1;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
/*
* Power down
*/
send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
send_msg.data = data;
data[0] = 1; /* Power down state */
send_msg.data_len = 1;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
out:
return;
}
/*
* ipmi_dell_chassis_detect()
* Dell systems with IPMI < 1.5 don't set the chassis capability bit
* but they can handle a chassis poweroff or powercycle command.
*/
#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
static int ipmi_dell_chassis_detect(ipmi_user_t user)
{
const char ipmi_version_major = ipmi_version & 0xF;
const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
const char mfr[3] = DELL_IANA_MFR_ID;
if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
ipmi_version_major <= 1 &&
ipmi_version_minor < 5)
return 1;
return 0;
}
/*
* Standard chassis support
*/
#define IPMI_NETFN_CHASSIS_REQUEST 0
#define IPMI_CHASSIS_CONTROL_CMD 0x02
static int ipmi_chassis_detect(ipmi_user_t user)
{
/* Chassis support, use it. */
return (capabilities & 0x80);
}
static void ipmi_poweroff_chassis(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
powercyclefailed:
printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
(poweroff_powercycle ? "cycle" : "down"));
/*
* Power down
*/
send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
if (poweroff_powercycle)
data[0] = IPMI_CHASSIS_POWER_CYCLE;
else
data[0] = IPMI_CHASSIS_POWER_DOWN;
send_msg.data = data;
send_msg.data_len = sizeof(data);
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv) {
if (poweroff_powercycle) {
/* power cycle failed, default to power down */
printk(KERN_ERR PFX "Unable to send chassis power " \
"cycle message, IPMI error 0x%x\n", rv);
poweroff_powercycle = 0;
goto powercyclefailed;
}
printk(KERN_ERR PFX "Unable to send chassis power " \
"down message, IPMI error 0x%x\n", rv);
}
}
/* Table of possible power off functions. */
struct poweroff_function {
char *platform_type;
int (*detect)(ipmi_user_t user);
void (*poweroff_func)(ipmi_user_t user);
};
static struct poweroff_function poweroff_functions[] = {
{ .platform_type = "ATCA",
.detect = ipmi_atca_detect,
.poweroff_func = ipmi_poweroff_atca },
{ .platform_type = "CPI1",
.detect = ipmi_cpi1_detect,
.poweroff_func = ipmi_poweroff_cpi1 },
{ .platform_type = "chassis",
.detect = ipmi_dell_chassis_detect,
.poweroff_func = ipmi_poweroff_chassis },
/* Chassis should generally be last, other things should override
it. */
{ .platform_type = "chassis",
.detect = ipmi_chassis_detect,
.poweroff_func = ipmi_poweroff_chassis },
};
#define NUM_PO_FUNCS (sizeof(poweroff_functions) \
/ sizeof(struct poweroff_function))
/* Called on a powerdown request. */
static void ipmi_poweroff_function(void)
{
if (!ready)
return;
/* Use run-to-completion mode, since interrupts may be off. */
specific_poweroff_func(ipmi_user);
}
/* Wait for an IPMI interface to be installed, the first one installed
will be grabbed by this code and used to perform the powerdown. */
static void ipmi_po_new_smi(int if_num, struct device *device)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
int i;
if (ready)
return;
if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
return;
rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
&ipmi_user);
if (rv) {
printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
rv);
return;
}
ipmi_ifnum = if_num;
/*
* Do a get device ide and store some results, since this is
* used by several functions.
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
send_msg.netfn = IPMI_NETFN_APP_REQUEST;
send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
send_msg.data = NULL;
send_msg.data_len = 0;
rv = ipmi_request_wait_for_response(ipmi_user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv) {
printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
" IPMI error 0x%x\n", rv);
goto out_err;
}
if (halt_recv_msg.msg.data_len < 12) {
printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
" short, was %d bytes, needed %d bytes\n",
halt_recv_msg.msg.data_len, 12);
goto out_err;
}
mfg_id = (halt_recv_msg.msg.data[7]
| (halt_recv_msg.msg.data[8] << 8)
| (halt_recv_msg.msg.data[9] << 16));
prod_id = (halt_recv_msg.msg.data[10]
| (halt_recv_msg.msg.data[11] << 8));
capabilities = halt_recv_msg.msg.data[6];
ipmi_version = halt_recv_msg.msg.data[5];
/* Scan for a poweroff method */
for (i = 0; i < NUM_PO_FUNCS; i++) {
if (poweroff_functions[i].detect(ipmi_user))
goto found;
}
out_err:
printk(KERN_ERR PFX "Unable to find a poweroff function that"
" will work, giving up\n");
ipmi_destroy_user(ipmi_user);
return;
found:
printk(KERN_INFO PFX "Found a %s style poweroff function\n",
poweroff_functions[i].platform_type);
specific_poweroff_func = poweroff_functions[i].poweroff_func;
old_poweroff_func = pm_power_off;
pm_power_off = ipmi_poweroff_function;
ready = 1;
}
static void ipmi_po_smi_gone(int if_num)
{
if (!ready)
return;
if (ipmi_ifnum != if_num)
return;
ready = 0;
ipmi_destroy_user(ipmi_user);
pm_power_off = old_poweroff_func;
}
static struct ipmi_smi_watcher smi_watcher = {
.owner = THIS_MODULE,
.new_smi = ipmi_po_new_smi,
.smi_gone = ipmi_po_smi_gone
};
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
static ctl_table ipmi_table[] = {
{ .ctl_name = DEV_IPMI_POWEROFF_POWERCYCLE,
.procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
.mode = 0644,
.proc_handler = &proc_dointvec },
{ }
};
static ctl_table ipmi_dir_table[] = {
{ .ctl_name = DEV_IPMI,
.procname = "ipmi",
.mode = 0555,
.child = ipmi_table },
{ }
};
static ctl_table ipmi_root_table[] = {
{ .ctl_name = CTL_DEV,
.procname = "dev",
.mode = 0555,
.child = ipmi_dir_table },
{ }
};
static struct ctl_table_header *ipmi_table_header;
#endif /* CONFIG_PROC_FS */
/*
* Startup and shutdown functions.
*/
static int __init ipmi_poweroff_init(void)
{
int rv;
printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
" IPMI Powerdown via sys_reboot.\n");
if (poweroff_powercycle)
printk(KERN_INFO PFX "Power cycle is enabled.\n");
#ifdef CONFIG_PROC_FS
ipmi_table_header = register_sysctl_table(ipmi_root_table);
if (!ipmi_table_header) {
printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
rv = -ENOMEM;
goto out_err;
}
#endif
rv = ipmi_smi_watcher_register(&smi_watcher);
#ifdef CONFIG_PROC_FS
if (rv) {
unregister_sysctl_table(ipmi_table_header);
printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
goto out_err;
}
out_err:
#endif
return rv;
}
#ifdef MODULE
static void __exit ipmi_poweroff_cleanup(void)
{
int rv;
#ifdef CONFIG_PROC_FS
unregister_sysctl_table(ipmi_table_header);
#endif
ipmi_smi_watcher_unregister(&smi_watcher);
if (ready) {
rv = ipmi_destroy_user(ipmi_user);
if (rv)
printk(KERN_ERR PFX "could not cleanup the IPMI"
" user: 0x%x\n", rv);
pm_power_off = old_poweroff_func;
}
}
module_exit(ipmi_poweroff_cleanup);
#endif
module_init(ipmi_poweroff_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
| gpl-2.0 |
manuelnaranjo/goldenleaf | drivers/block/hd.c | 508 | 19338 | /*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This is the low-level hd interrupt support. It traverses the
* request-list, using interrupts to jump between functions. As
* all the functions are called within interrupts, we may not
* sleep. Special care is recommended.
*
* modified by Drew Eckhardt to check nr of hd's from the CMOS.
*
* Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
* in the early extended-partition checks and added DM partitions
*
* IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
* and general streamlining by Mark Lord.
*
* Removed 99% of above. Use Mark's ide driver for those options.
* This is now a lightweight ST-506 driver. (Paul Gortmaker)
*
* Modified 1995 Russell King for ARM processor.
*
* Bugfix: max_sectors must be <= 255 or the wheels tend to come
* off in a hurry once you queue things up - Paul G. 02/2001
*/
/* Uncomment the following if you want verbose error reports. */
/* #define VERBOSE_ERRORS */
#include <linux/blkdev.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/blkpg.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#define HD_IRQ 14
#define REALLY_SLOW_IO
#include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#ifdef __arm__
#undef HD_IRQ
#endif
#include <asm/irq.h>
#ifdef __arm__
#define HD_IRQ IRQ_HARDDISK
#endif
/* Hd controller regster ports */
#define HD_DATA 0x1f0 /* _CTL when writing */
#define HD_ERROR 0x1f1 /* see err-bits */
#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
#define HD_SECTOR 0x1f3 /* starting sector */
#define HD_LCYL 0x1f4 /* starting cylinder */
#define HD_HCYL 0x1f5 /* high byte of starting cyl */
#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
#define HD_STATUS 0x1f7 /* see status-bits */
#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
#define HD_CMD 0x3f6 /* used for resets */
#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
/* Bits of HD_STATUS */
#define ERR_STAT 0x01
#define INDEX_STAT 0x02
#define ECC_STAT 0x04 /* Corrected error */
#define DRQ_STAT 0x08
#define SEEK_STAT 0x10
#define SERVICE_STAT SEEK_STAT
#define WRERR_STAT 0x20
#define READY_STAT 0x40
#define BUSY_STAT 0x80
/* Bits for HD_ERROR */
#define MARK_ERR 0x01 /* Bad address mark */
#define TRK0_ERR 0x02 /* couldn't find track 0 */
#define ABRT_ERR 0x04 /* Command aborted */
#define MCR_ERR 0x08 /* media change request */
#define ID_ERR 0x10 /* ID field not found */
#define MC_ERR 0x20 /* media changed */
#define ECC_ERR 0x40 /* Uncorrectable ECC error */
#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
static DEFINE_SPINLOCK(hd_lock);
static struct request_queue *hd_queue;
static struct request *hd_req;
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
#define MAX_ERRORS 16 /* Max read/write errors/sector */
#define RESET_FREQ 8 /* Reset controller every 8th retry */
#define RECAL_FREQ 4 /* Recalibrate every 4th retry */
#define MAX_HD 2
#define STAT_OK (READY_STAT|SEEK_STAT)
#define OK_STATUS(s) (((s)&(STAT_OK|(BUSY_STAT|WRERR_STAT|ERR_STAT)))==STAT_OK)
static void recal_intr(void);
static void bad_rw_intr(void);
static int reset;
static int hd_error;
/*
* This struct defines the HD's and their types.
*/
struct hd_i_struct {
unsigned int head, sect, cyl, wpcom, lzone, ctl;
int unit;
int recalibrate;
int special_op;
};
#ifdef HD_TYPE
static struct hd_i_struct hd_info[] = { HD_TYPE };
static int NR_HD = ARRAY_SIZE(hd_info);
#else
static struct hd_i_struct hd_info[MAX_HD];
static int NR_HD;
#endif
static struct gendisk *hd_gendisk[MAX_HD];
static struct timer_list device_timer;
#define TIMEOUT_VALUE (6*HZ)
#define SET_TIMER \
do { \
mod_timer(&device_timer, jiffies + TIMEOUT_VALUE); \
} while (0)
static void (*do_hd)(void) = NULL;
#define SET_HANDLER(x) \
if ((do_hd = (x)) != NULL) \
SET_TIMER; \
else \
del_timer(&device_timer);
#if (HD_DELAY > 0)
#include <asm/i8253.h>
unsigned long last_req;
unsigned long read_timer(void)
{
unsigned long t, flags;
int i;
spin_lock_irqsave(&i8253_lock, flags);
t = jiffies * 11932;
outb_p(0, 0x43);
i = inb_p(0x40);
i |= inb(0x40) << 8;
spin_unlock_irqrestore(&i8253_lock, flags);
return(t - i);
}
#endif
static void __init hd_setup(char *str, int *ints)
{
int hdind = 0;
if (ints[0] != 3)
return;
if (hd_info[0].head != 0)
hdind = 1;
hd_info[hdind].head = ints[2];
hd_info[hdind].sect = ints[3];
hd_info[hdind].cyl = ints[1];
hd_info[hdind].wpcom = 0;
hd_info[hdind].lzone = ints[1];
hd_info[hdind].ctl = (ints[2] > 8 ? 8 : 0);
NR_HD = hdind+1;
}
static bool hd_end_request(int err, unsigned int bytes)
{
if (__blk_end_request(hd_req, err, bytes))
return true;
hd_req = NULL;
return false;
}
static bool hd_end_request_cur(int err)
{
return hd_end_request(err, blk_rq_cur_bytes(hd_req));
}
static void dump_status(const char *msg, unsigned int stat)
{
char *name = "hd?";
if (hd_req)
name = hd_req->rq_disk->disk_name;
#ifdef VERBOSE_ERRORS
printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
if (stat & BUSY_STAT) printk("Busy ");
if (stat & READY_STAT) printk("DriveReady ");
if (stat & WRERR_STAT) printk("WriteFault ");
if (stat & SEEK_STAT) printk("SeekComplete ");
if (stat & DRQ_STAT) printk("DataRequest ");
if (stat & ECC_STAT) printk("CorrectedError ");
if (stat & INDEX_STAT) printk("Index ");
if (stat & ERR_STAT) printk("Error ");
printk("}\n");
if ((stat & ERR_STAT) == 0) {
hd_error = 0;
} else {
hd_error = inb(HD_ERROR);
printk("%s: %s: error=0x%02x { ", name, msg, hd_error & 0xff);
if (hd_error & BBD_ERR) printk("BadSector ");
if (hd_error & ECC_ERR) printk("UncorrectableError ");
if (hd_error & ID_ERR) printk("SectorIdNotFound ");
if (hd_error & ABRT_ERR) printk("DriveStatusError ");
if (hd_error & TRK0_ERR) printk("TrackZeroNotFound ");
if (hd_error & MARK_ERR) printk("AddrMarkNotFound ");
printk("}");
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
if (hd_req)
printk(", sector=%ld", blk_rq_pos(hd_req));
}
printk("\n");
}
#else
printk("%s: %s: status=0x%02x.\n", name, msg, stat & 0xff);
if ((stat & ERR_STAT) == 0) {
hd_error = 0;
} else {
hd_error = inb(HD_ERROR);
printk("%s: %s: error=0x%02x.\n", name, msg, hd_error & 0xff);
}
#endif
}
static void check_status(void)
{
int i = inb_p(HD_STATUS);
if (!OK_STATUS(i)) {
dump_status("check_status", i);
bad_rw_intr();
}
}
static int controller_busy(void)
{
int retries = 100000;
unsigned char status;
do {
status = inb_p(HD_STATUS);
} while ((status & BUSY_STAT) && --retries);
return status;
}
static int status_ok(void)
{
unsigned char status = inb_p(HD_STATUS);
if (status & BUSY_STAT)
return 1; /* Ancient, but does it make sense??? */
if (status & WRERR_STAT)
return 0;
if (!(status & READY_STAT))
return 0;
if (!(status & SEEK_STAT))
return 0;
return 1;
}
static int controller_ready(unsigned int drive, unsigned int head)
{
int retry = 100;
do {
if (controller_busy() & BUSY_STAT)
return 0;
outb_p(0xA0 | (drive<<4) | head, HD_CURRENT);
if (status_ok())
return 1;
} while (--retry);
return 0;
}
static void hd_out(struct hd_i_struct *disk,
unsigned int nsect,
unsigned int sect,
unsigned int head,
unsigned int cyl,
unsigned int cmd,
void (*intr_addr)(void))
{
unsigned short port;
#if (HD_DELAY > 0)
while (read_timer() - last_req < HD_DELAY)
/* nothing */;
#endif
if (reset)
return;
if (!controller_ready(disk->unit, head)) {
reset = 1;
return;
}
SET_HANDLER(intr_addr);
outb_p(disk->ctl, HD_CMD);
port = HD_DATA;
outb_p(disk->wpcom >> 2, ++port);
outb_p(nsect, ++port);
outb_p(sect, ++port);
outb_p(cyl, ++port);
outb_p(cyl >> 8, ++port);
outb_p(0xA0 | (disk->unit << 4) | head, ++port);
outb_p(cmd, ++port);
}
static void hd_request (void);
static int drive_busy(void)
{
unsigned int i;
unsigned char c;
for (i = 0; i < 500000 ; i++) {
c = inb_p(HD_STATUS);
if ((c & (BUSY_STAT | READY_STAT | SEEK_STAT)) == STAT_OK)
return 0;
}
dump_status("reset timed out", c);
return 1;
}
static void reset_controller(void)
{
int i;
outb_p(4, HD_CMD);
for (i = 0; i < 1000; i++) barrier();
outb_p(hd_info[0].ctl & 0x0f, HD_CMD);
for (i = 0; i < 1000; i++) barrier();
if (drive_busy())
printk("hd: controller still busy\n");
else if ((hd_error = inb(HD_ERROR)) != 1)
printk("hd: controller reset failed: %02x\n", hd_error);
}
static void reset_hd(void)
{
static int i;
repeat:
if (reset) {
reset = 0;
i = -1;
reset_controller();
} else {
check_status();
if (reset)
goto repeat;
}
if (++i < NR_HD) {
struct hd_i_struct *disk = &hd_info[i];
disk->special_op = disk->recalibrate = 1;
hd_out(disk, disk->sect, disk->sect, disk->head-1,
disk->cyl, ATA_CMD_INIT_DEV_PARAMS, &reset_hd);
if (reset)
goto repeat;
} else
hd_request();
}
/*
* Ok, don't know what to do with the unexpected interrupts: on some machines
* doing a reset and a retry seems to result in an eternal loop. Right now I
* ignore it, and just set the timeout.
*
* On laptops (and "green" PCs), an unexpected interrupt occurs whenever the
* drive enters "idle", "standby", or "sleep" mode, so if the status looks
* "good", we just ignore the interrupt completely.
*/
static void unexpected_hd_interrupt(void)
{
unsigned int stat = inb_p(HD_STATUS);
if (stat & (BUSY_STAT|DRQ_STAT|ECC_STAT|ERR_STAT)) {
dump_status("unexpected interrupt", stat);
SET_TIMER;
}
}
/*
* bad_rw_intr() now tries to be a bit smarter and does things
* according to the error returned by the controller.
* -Mika Liljeberg (liljeber@cs.Helsinki.FI)
*/
static void bad_rw_intr(void)
{
struct request *req = hd_req;
if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
hd_end_request_cur(-EIO);
disk->special_op = disk->recalibrate = 1;
} else if (req->errors % RESET_FREQ == 0)
reset = 1;
else if ((hd_error & TRK0_ERR) || req->errors % RECAL_FREQ == 0)
disk->special_op = disk->recalibrate = 1;
/* Otherwise just retry */
}
}
static inline int wait_DRQ(void)
{
int retries;
int stat;
for (retries = 0; retries < 100000; retries++) {
stat = inb_p(HD_STATUS);
if (stat & DRQ_STAT)
return 0;
}
dump_status("wait_DRQ", stat);
return -1;
}
static void read_intr(void)
{
struct request *req;
int i, retries = 100000;
do {
i = (unsigned) inb_p(HD_STATUS);
if (i & BUSY_STAT)
continue;
if (!OK_STATUS(i))
break;
if (i & DRQ_STAT)
goto ok_to_read;
} while (--retries > 0);
dump_status("read_intr", i);
bad_rw_intr();
hd_request();
return;
ok_to_read:
req = hd_req;
insw(HD_DATA, req->buffer, 256);
#ifdef DEBUG
printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
req->rq_disk->disk_name, blk_rq_pos(req) + 1,
blk_rq_sectors(req) - 1, req->buffer+512);
#endif
if (hd_end_request(0, 512)) {
SET_HANDLER(&read_intr);
return;
}
(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
hd_request();
}
static void write_intr(void)
{
struct request *req = hd_req;
int i;
int retries = 100000;
do {
i = (unsigned) inb_p(HD_STATUS);
if (i & BUSY_STAT)
continue;
if (!OK_STATUS(i))
break;
if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
goto ok_to_write;
} while (--retries > 0);
dump_status("write_intr", i);
bad_rw_intr();
hd_request();
return;
ok_to_write:
if (hd_end_request(0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
return;
}
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
hd_request();
}
static void recal_intr(void)
{
check_status();
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
hd_request();
}
/*
* This is another of the error-routines I don't know what to do with. The
* best idea seems to just set reset, and start all over again.
*/
static void hd_times_out(unsigned long dummy)
{
char *name;
do_hd = NULL;
if (!hd_req)
return;
spin_lock_irq(hd_queue->queue_lock);
reset = 1;
name = hd_req->rq_disk->disk_name;
printk("%s: timeout\n", name);
if (++hd_req->errors >= MAX_ERRORS) {
#ifdef DEBUG
printk("%s: too many errors\n", name);
#endif
hd_end_request_cur(-EIO);
}
hd_request();
spin_unlock_irq(hd_queue->queue_lock);
}
static int do_special_op(struct hd_i_struct *disk, struct request *req)
{
if (disk->recalibrate) {
disk->recalibrate = 0;
hd_out(disk, disk->sect, 0, 0, 0, ATA_CMD_RESTORE, &recal_intr);
return reset;
}
if (disk->head > 16) {
printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
hd_end_request_cur(-EIO);
}
disk->special_op = 0;
return 1;
}
/*
* The driver enables interrupts as much as possible. In order to do this,
* (a) the device-interrupt is disabled before entering hd_request(),
* and (b) the timeout-interrupt is disabled before the sti().
*
* Interrupts are still masked (by default) whenever we are exchanging
* data/cmds with a drive, because some drives seem to have very poor
* tolerance for latency during I/O. The IDE driver has support to unmask
* interrupts for non-broken hardware, so use that driver if required.
*/
static void hd_request(void)
{
unsigned int block, nsect, sec, track, head, cyl;
struct hd_i_struct *disk;
struct request *req;
if (do_hd)
return;
repeat:
del_timer(&device_timer);
if (!hd_req) {
hd_req = blk_fetch_request(hd_queue);
if (!hd_req) {
do_hd = NULL;
return;
}
}
req = hd_req;
if (reset) {
reset_hd();
return;
}
disk = req->rq_disk->private_data;
block = blk_rq_pos(req);
nsect = blk_rq_sectors(req);
if (block >= get_capacity(req->rq_disk) ||
((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
req->rq_disk->disk_name, block, nsect);
hd_end_request_cur(-EIO);
goto repeat;
}
if (disk->special_op) {
if (do_special_op(disk, req))
goto repeat;
return;
}
sec = block % disk->sect + 1;
track = block / disk->sect;
head = track % disk->head;
cyl = track / disk->head;
#ifdef DEBUG
printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n",
req->rq_disk->disk_name,
req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, req->buffer);
#endif
if (blk_fs_request(req)) {
switch (rq_data_dir(req)) {
case READ:
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
&read_intr);
if (reset)
goto repeat;
break;
case WRITE:
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
&write_intr);
if (reset)
goto repeat;
if (wait_DRQ()) {
bad_rw_intr();
goto repeat;
}
outsw(HD_DATA, req->buffer, 256);
break;
default:
printk("unknown hd-command\n");
hd_end_request_cur(-EIO);
break;
}
}
}
static void do_hd_request(struct request_queue *q)
{
hd_request();
}
static int hd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct hd_i_struct *disk = bdev->bd_disk->private_data;
geo->heads = disk->head;
geo->sectors = disk->sect;
geo->cylinders = disk->cyl;
return 0;
}
/*
* Releasing a block device means we sync() it, so that it can safely
* be forgotten about...
*/
static irqreturn_t hd_interrupt(int irq, void *dev_id)
{
void (*handler)(void) = do_hd;
spin_lock(hd_queue->queue_lock);
do_hd = NULL;
del_timer(&device_timer);
if (!handler)
handler = unexpected_hd_interrupt;
handler();
spin_unlock(hd_queue->queue_lock);
return IRQ_HANDLED;
}
static const struct block_device_operations hd_fops = {
.getgeo = hd_getgeo,
};
/*
* This is the hard disk IRQ description. The IRQF_DISABLED in sa_flags
* means we run the IRQ-handler with interrupts disabled: this is bad for
* interrupt latency, but anything else has led to problems on some
* machines.
*
* We enable interrupts in some of the routines after making sure it's
* safe.
*/
static int __init hd_init(void)
{
int drive;
if (register_blkdev(HD_MAJOR, "hd"))
return -1;
hd_queue = blk_init_queue(do_hd_request, &hd_lock);
if (!hd_queue) {
unregister_blkdev(HD_MAJOR, "hd");
return -ENOMEM;
}
blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
blk_queue_logical_block_size(hd_queue, 512);
if (!NR_HD) {
/*
* We don't know anything about the drive. This means
* that you *MUST* specify the drive parameters to the
* kernel yourself.
*
* If we were on an i386, we used to read this info from
* the BIOS or CMOS. This doesn't work all that well,
* since this assumes that this is a primary or secondary
* drive, and if we're using this legacy driver, it's
* probably an auxilliary controller added to recover
* legacy data off an ST-506 drive. Either way, it's
* definitely safest to have the user explicitly specify
* the information.
*/
printk("hd: no drives specified - use hd=cyl,head,sectors"
" on kernel command line\n");
goto out;
}
for (drive = 0 ; drive < NR_HD ; drive++) {
struct gendisk *disk = alloc_disk(64);
struct hd_i_struct *p = &hd_info[drive];
if (!disk)
goto Enomem;
disk->major = HD_MAJOR;
disk->first_minor = drive << 6;
disk->fops = &hd_fops;
sprintf(disk->disk_name, "hd%c", 'a'+drive);
disk->private_data = p;
set_capacity(disk, p->head * p->sect * p->cyl);
disk->queue = hd_queue;
p->unit = drive;
hd_gendisk[drive] = disk;
printk("%s: %luMB, CHS=%d/%d/%d\n",
disk->disk_name, (unsigned long)get_capacity(disk)/2048,
p->cyl, p->head, p->sect);
}
if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) {
printk("hd: unable to get IRQ%d for the hard disk driver\n",
HD_IRQ);
goto out1;
}
if (!request_region(HD_DATA, 8, "hd")) {
printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA);
goto out2;
}
if (!request_region(HD_CMD, 1, "hd(cmd)")) {
printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD);
goto out3;
}
/* Let them fly */
for (drive = 0; drive < NR_HD; drive++)
add_disk(hd_gendisk[drive]);
return 0;
out3:
release_region(HD_DATA, 8);
out2:
free_irq(HD_IRQ, NULL);
out1:
for (drive = 0; drive < NR_HD; drive++)
put_disk(hd_gendisk[drive]);
NR_HD = 0;
out:
del_timer(&device_timer);
unregister_blkdev(HD_MAJOR, "hd");
blk_cleanup_queue(hd_queue);
return -1;
Enomem:
while (drive--)
put_disk(hd_gendisk[drive]);
goto out;
}
static int __init parse_hd_setup(char *line)
{
int ints[6];
(void) get_options(line, ARRAY_SIZE(ints), ints);
hd_setup(NULL, ints);
return 1;
}
__setup("hd=", parse_hd_setup);
late_initcall(hd_init);
| gpl-2.0 |
longman88/kernel-qspinlock-v13 | arch/mips/mm/sc-r5k.c | 1020 | 2159 | /*
* Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
* derived from r4xx0.c by David S. Miller (davem@davemloft.net).
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/mipsregs.h>
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
/* Secondary cache size in bytes, if present. */
static unsigned long scache_size;
#define SC_LINE 32
#define SC_PAGE (128*SC_LINE)
static inline void blast_r5000_scache(void)
{
unsigned long start = INDEX_BASE;
unsigned long end = start + scache_size;
while(start < end) {
cache_op(R5K_Page_Invalidate_S, start);
start += SC_PAGE;
}
}
static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
{
unsigned long end, a;
/* Catch bad driver code */
BUG_ON(size == 0);
if (size >= scache_size) {
blast_r5000_scache();
return;
}
/* On the R5000 secondary cache we cannot
* invalidate less than a page at a time.
* The secondary cache is physically indexed, write-through.
*/
a = addr & ~(SC_PAGE - 1);
end = (addr + size - 1) & ~(SC_PAGE - 1);
while (a <= end) {
cache_op(R5K_Page_Invalidate_S, a);
a += SC_PAGE;
}
}
static void r5k_sc_enable(void)
{
unsigned long flags;
local_irq_save(flags);
set_c0_config(R5K_CONF_SE);
blast_r5000_scache();
local_irq_restore(flags);
}
static void r5k_sc_disable(void)
{
unsigned long flags;
local_irq_save(flags);
blast_r5000_scache();
clear_c0_config(R5K_CONF_SE);
local_irq_restore(flags);
}
static inline int __init r5k_sc_probe(void)
{
unsigned long config = read_c0_config();
if (config & CONF_SC)
return(0);
scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
printk("R5000 SCACHE size %ldkB, linesize 32 bytes.\n",
scache_size >> 10);
return 1;
}
static struct bcache_ops r5k_sc_ops = {
.bc_enable = r5k_sc_enable,
.bc_disable = r5k_sc_disable,
.bc_wback_inv = r5k_dma_cache_inv_sc,
.bc_inv = r5k_dma_cache_inv_sc
};
void r5k_sc_init(void)
{
if (r5k_sc_probe()) {
r5k_sc_enable();
bcops = &r5k_sc_ops;
}
}
| gpl-2.0 |
sonicxml/Spectrum | arch/sparc/kernel/sparc_ksyms_64.c | 1276 | 1030 | /* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/cpudata.h>
#include <asm/uaccess.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/hypervisor.h>
struct poll {
int fd;
short events;
short revents;
};
/* from helpers.S */
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
/* from head_64.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(tlb_type);
EXPORT_SYMBOL(sun4v_chip_type);
EXPORT_SYMBOL(prom_root_node);
/* from hvcalls.S */
EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
| gpl-2.0 |
aatjitra/sgs3 | arch/arm/mach-versatile/versatile_ab.c | 2812 | 1410 | /*
* linux/arch/arm/mach-versatile/versatile_ab.c
*
* Copyright (C) 2004 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include "core.h"
MACHINE_START(VERSATILE_AB, "ARM-Versatile AB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.boot_params = 0x00000100,
.map_io = versatile_map_io,
.init_early = versatile_init_early,
.init_irq = versatile_init_irq,
.timer = &versatile_timer,
.init_machine = versatile_init,
MACHINE_END
| gpl-2.0 |
bigbiff/android_device_samsung_mondrianwfiue | net/xfrm/xfrm_state.c | 4604 | 54689 | /*
* xfrm_state.c
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* IPv6 support
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific functions
* Derek Atkins <derek@ihtfp.com>
* Add UDP Encapsulation
*
*/
#include <linux/workqueue.h>
#include <net/xfrm.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/audit.h>
#include <asm/uaccess.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include "xfrm_hash.h"
/* Each xfrm_state may be linked to two tables:
1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
2. Hash table by (daddr,family,reqid) to find what SAs exist for given
destination/tunnel endpoint. (output)
*/
static DEFINE_SPINLOCK(xfrm_state_lock);
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u32 reqid,
unsigned short family)
{
return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
}
static inline unsigned int xfrm_src_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
unsigned short family)
{
return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
}
static inline unsigned int
xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
__be32 spi, u8 proto, unsigned short family)
{
return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
}
static void xfrm_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
struct hlist_head *nsrctable,
struct hlist_head *nspitable,
unsigned int nhashmask)
{
struct hlist_node *entry, *tmp;
struct xfrm_state *x;
hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
unsigned int h;
h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family,
nhashmask);
hlist_add_head(&x->bydst, ndsttable+h);
h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
x->props.family,
nhashmask);
hlist_add_head(&x->bysrc, nsrctable+h);
if (x->id.spi) {
h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
x->id.proto, x->props.family,
nhashmask);
hlist_add_head(&x->byspi, nspitable+h);
}
}
}
static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
{
return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
}
static DEFINE_MUTEX(hash_resize_mutex);
static void xfrm_hash_resize(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.state_hash_work);
struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
unsigned long nsize, osize;
unsigned int nhashmask, ohashmask;
int i;
mutex_lock(&hash_resize_mutex);
nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
ndst = xfrm_hash_alloc(nsize);
if (!ndst)
goto out_unlock;
nsrc = xfrm_hash_alloc(nsize);
if (!nsrc) {
xfrm_hash_free(ndst, nsize);
goto out_unlock;
}
nspi = xfrm_hash_alloc(nsize);
if (!nspi) {
xfrm_hash_free(ndst, nsize);
xfrm_hash_free(nsrc, nsize);
goto out_unlock;
}
spin_lock_bh(&xfrm_state_lock);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
for (i = net->xfrm.state_hmask; i >= 0; i--)
xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi,
nhashmask);
odst = net->xfrm.state_bydst;
osrc = net->xfrm.state_bysrc;
ospi = net->xfrm.state_byspi;
ohashmask = net->xfrm.state_hmask;
net->xfrm.state_bydst = ndst;
net->xfrm.state_bysrc = nsrc;
net->xfrm.state_byspi = nspi;
net->xfrm.state_hmask = nhashmask;
spin_unlock_bh(&xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
out_unlock:
mutex_unlock(&hash_resize_mutex);
}
static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
static DEFINE_SPINLOCK(xfrm_state_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
{
struct xfrm_state_afinfo *afinfo;
if (unlikely(family >= NPROTO))
return NULL;
write_lock_bh(&xfrm_state_afinfo_lock);
afinfo = xfrm_state_afinfo[family];
if (unlikely(!afinfo))
write_unlock_bh(&xfrm_state_afinfo_lock);
return afinfo;
}
static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
__releases(xfrm_state_afinfo_lock)
{
write_unlock_bh(&xfrm_state_afinfo_lock);
}
int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
const struct xfrm_type **typemap;
int err = 0;
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
typemap = afinfo->type_map;
if (likely(typemap[type->proto] == NULL))
typemap[type->proto] = type;
else
err = -EEXIST;
xfrm_state_unlock_afinfo(afinfo);
return err;
}
EXPORT_SYMBOL(xfrm_register_type);
int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
const struct xfrm_type **typemap;
int err = 0;
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
typemap = afinfo->type_map;
if (unlikely(typemap[type->proto] != type))
err = -ENOENT;
else
typemap[type->proto] = NULL;
xfrm_state_unlock_afinfo(afinfo);
return err;
}
EXPORT_SYMBOL(xfrm_unregister_type);
static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
{
struct xfrm_state_afinfo *afinfo;
const struct xfrm_type **typemap;
const struct xfrm_type *type;
int modload_attempted = 0;
retry:
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return NULL;
typemap = afinfo->type_map;
type = typemap[proto];
if (unlikely(type && !try_module_get(type->owner)))
type = NULL;
if (!type && !modload_attempted) {
xfrm_state_put_afinfo(afinfo);
request_module("xfrm-type-%d-%d", family, proto);
modload_attempted = 1;
goto retry;
}
xfrm_state_put_afinfo(afinfo);
return type;
}
static void xfrm_put_type(const struct xfrm_type *type)
{
module_put(type->owner);
}
int xfrm_register_mode(struct xfrm_mode *mode, int family)
{
struct xfrm_state_afinfo *afinfo;
struct xfrm_mode **modemap;
int err;
if (unlikely(mode->encap >= XFRM_MODE_MAX))
return -EINVAL;
afinfo = xfrm_state_lock_afinfo(family);
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
err = -EEXIST;
modemap = afinfo->mode_map;
if (modemap[mode->encap])
goto out;
err = -ENOENT;
if (!try_module_get(afinfo->owner))
goto out;
mode->afinfo = afinfo;
modemap[mode->encap] = mode;
err = 0;
out:
xfrm_state_unlock_afinfo(afinfo);
return err;
}
EXPORT_SYMBOL(xfrm_register_mode);
int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
{
struct xfrm_state_afinfo *afinfo;
struct xfrm_mode **modemap;
int err;
if (unlikely(mode->encap >= XFRM_MODE_MAX))
return -EINVAL;
afinfo = xfrm_state_lock_afinfo(family);
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
err = -ENOENT;
modemap = afinfo->mode_map;
if (likely(modemap[mode->encap] == mode)) {
modemap[mode->encap] = NULL;
module_put(mode->afinfo->owner);
err = 0;
}
xfrm_state_unlock_afinfo(afinfo);
return err;
}
EXPORT_SYMBOL(xfrm_unregister_mode);
static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
{
struct xfrm_state_afinfo *afinfo;
struct xfrm_mode *mode;
int modload_attempted = 0;
if (unlikely(encap >= XFRM_MODE_MAX))
return NULL;
retry:
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return NULL;
mode = afinfo->mode_map[encap];
if (unlikely(mode && !try_module_get(mode->owner)))
mode = NULL;
if (!mode && !modload_attempted) {
xfrm_state_put_afinfo(afinfo);
request_module("xfrm-mode-%d-%d", family, encap);
modload_attempted = 1;
goto retry;
}
xfrm_state_put_afinfo(afinfo);
return mode;
}
static void xfrm_put_mode(struct xfrm_mode *mode)
{
module_put(mode->owner);
}
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
tasklet_hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
kfree(x->aalg);
kfree(x->ealg);
kfree(x->calg);
kfree(x->encap);
kfree(x->coaddr);
kfree(x->replay_esn);
kfree(x->preplay_esn);
if (x->inner_mode)
xfrm_put_mode(x->inner_mode);
if (x->inner_mode_iaf)
xfrm_put_mode(x->inner_mode_iaf);
if (x->outer_mode)
xfrm_put_mode(x->outer_mode);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
}
security_xfrm_state_free(x);
kfree(x);
}
static void xfrm_state_gc_task(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.state_gc_work);
struct xfrm_state *x;
struct hlist_node *entry, *tmp;
struct hlist_head gc_list;
spin_lock_bh(&xfrm_state_gc_lock);
hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
xfrm_state_gc_destroy(x);
wake_up(&net->xfrm.km_waitq);
}
static inline unsigned long make_jiffies(long secs)
{
if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
return MAX_SCHEDULE_TIMEOUT-1;
else
return secs*HZ;
}
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
{
struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
struct net *net = xs_net(x);
unsigned long now = get_seconds();
long next = LONG_MAX;
int warn = 0;
int err = 0;
spin_lock(&x->lock);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
if (x->km.state == XFRM_STATE_EXPIRED)
goto expired;
if (x->lft.hard_add_expires_seconds) {
long tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (x->lft.hard_use_expires_seconds) {
long tmo = x->lft.hard_use_expires_seconds +
(x->curlft.use_time ? : now) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (x->km.dying)
goto resched;
if (x->lft.soft_add_expires_seconds) {
long tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0)
warn = 1;
else if (tmo < next)
next = tmo;
}
if (x->lft.soft_use_expires_seconds) {
long tmo = x->lft.soft_use_expires_seconds +
(x->curlft.use_time ? : now) - now;
if (tmo <= 0)
warn = 1;
else if (tmo < next)
next = tmo;
}
x->km.dying = warn;
if (warn)
km_state_expired(x, 0, 0);
resched:
if (next != LONG_MAX){
tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
}
goto out;
expired:
if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
x->km.state = XFRM_STATE_EXPIRED;
wake_up(&net->xfrm.km_waitq);
next = 2;
goto resched;
}
err = __xfrm_state_delete(x);
if (!err && x->id.spi)
km_state_expired(x, 1, 0);
xfrm_audit_state_delete(x, err ? 0 : 1,
audit_get_loginuid(current),
audit_get_sessionid(current), 0);
out:
spin_unlock(&x->lock);
return HRTIMER_NORESTART;
}
static void xfrm_replay_timer_handler(unsigned long data);
struct xfrm_state *xfrm_state_alloc(struct net *net)
{
struct xfrm_state *x;
x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
if (x) {
write_pnet(&x->xs_net, net);
atomic_set(&x->refcnt, 1);
atomic_set(&x->tunnel_users, 0);
INIT_LIST_HEAD(&x->km.all);
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
x->lft.soft_byte_limit = XFRM_INF;
x->lft.soft_packet_limit = XFRM_INF;
x->lft.hard_byte_limit = XFRM_INF;
x->lft.hard_packet_limit = XFRM_INF;
x->replay_maxage = 0;
x->replay_maxdiff = 0;
x->inner_mode = NULL;
x->inner_mode_iaf = NULL;
spin_lock_init(&x->lock);
}
return x;
}
EXPORT_SYMBOL(xfrm_state_alloc);
void __xfrm_state_destroy(struct xfrm_state *x)
{
struct net *net = xs_net(x);
WARN_ON(x->km.state != XFRM_STATE_DEAD);
spin_lock_bh(&xfrm_state_gc_lock);
hlist_add_head(&x->gclist, &net->xfrm.state_gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
schedule_work(&net->xfrm.state_gc_work);
}
EXPORT_SYMBOL(__xfrm_state_destroy);
int __xfrm_state_delete(struct xfrm_state *x)
{
struct net *net = xs_net(x);
int err = -ESRCH;
if (x->km.state != XFRM_STATE_DEAD) {
x->km.state = XFRM_STATE_DEAD;
spin_lock(&xfrm_state_lock);
list_del(&x->km.all);
hlist_del(&x->bydst);
hlist_del(&x->bysrc);
if (x->id.spi)
hlist_del(&x->byspi);
net->xfrm.state_num--;
spin_unlock(&xfrm_state_lock);
/* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that
* is what we are dropping here.
*/
xfrm_state_put(x);
err = 0;
}
return err;
}
EXPORT_SYMBOL(__xfrm_state_delete);
int xfrm_state_delete(struct xfrm_state *x)
{
int err;
spin_lock_bh(&x->lock);
err = __xfrm_state_delete(x);
spin_unlock_bh(&x->lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_delete);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static inline int
xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info)
{
int i, err = 0;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct hlist_node *entry;
struct xfrm_state *x;
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
if (xfrm_id_proto_match(x->id.proto, proto) &&
(err = security_xfrm_state_delete(x)) != 0) {
xfrm_audit_state_delete(x, 0,
audit_info->loginuid,
audit_info->sessionid,
audit_info->secid);
return err;
}
}
}
return err;
}
#else
static inline int
xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info)
{
return 0;
}
#endif
int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
{
int i, err = 0, cnt = 0;
spin_lock_bh(&xfrm_state_lock);
err = xfrm_state_flush_secctx_check(net, proto, audit_info);
if (err)
goto out;
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct hlist_node *entry;
struct xfrm_state *x;
restart:
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
if (!xfrm_state_kern(x) &&
xfrm_id_proto_match(x->id.proto, proto)) {
xfrm_state_hold(x);
spin_unlock_bh(&xfrm_state_lock);
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
audit_info->loginuid,
audit_info->sessionid,
audit_info->secid);
xfrm_state_put(x);
if (!err)
cnt++;
spin_lock_bh(&xfrm_state_lock);
goto restart;
}
}
}
if (cnt)
err = 0;
out:
spin_unlock_bh(&xfrm_state_lock);
wake_up(&net->xfrm.km_waitq);
return err;
}
EXPORT_SYMBOL(xfrm_state_flush);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
spin_lock_bh(&xfrm_state_lock);
si->sadcnt = net->xfrm.state_num;
si->sadhcnt = net->xfrm.state_hmask;
si->sadhmcnt = xfrm_state_hashmax;
spin_unlock_bh(&xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_sad_getinfo);
static int
xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
const struct xfrm_tmpl *tmpl,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
return -1;
afinfo->init_tempsel(&x->sel, fl);
if (family != tmpl->encap_family) {
xfrm_state_put_afinfo(afinfo);
afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
if (!afinfo)
return -1;
}
afinfo->init_temprop(x, tmpl, daddr, saddr);
xfrm_state_put_afinfo(afinfo);
return 0;
}
static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family)
{
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
struct hlist_node *entry;
hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
xfrm_addr_cmp(&x->id.daddr, daddr, family))
continue;
if ((mark & x->mark.m) != x->mark.v)
continue;
xfrm_state_hold(x);
return x;
}
return NULL;
}
static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
struct xfrm_state *x;
struct hlist_node *entry;
hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
if (x->props.family != family ||
x->id.proto != proto ||
xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
if ((mark & x->mark.m) != x->mark.v)
continue;
xfrm_state_hold(x);
return x;
}
return NULL;
}
static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
{
struct net *net = xs_net(x);
u32 mark = x->mark.v & x->mark.m;
if (use_spi)
return __xfrm_state_lookup(net, mark, &x->id.daddr,
x->id.spi, x->id.proto, family);
else
return __xfrm_state_lookup_byaddr(net, mark,
&x->id.daddr,
&x->props.saddr,
x->id.proto, family);
}
static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
{
if (have_hash_collision &&
(net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
net->xfrm.state_num > net->xfrm.state_hmask)
schedule_work(&net->xfrm.state_hash_work);
}
static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
const struct flowi *fl, unsigned short family,
struct xfrm_state **best, int *acq_in_progress,
int *error)
{
/* Resolution logic:
* 1. There is a valid state with matching selector. Done.
* 2. Valid state with inappropriate selector. Skip.
*
* Entering area of "sysdeps".
*
* 3. If state is not valid, selector is temporary, it selects
* only session which triggered previous resolution. Key
* manager will do something to install a state with proper
* selector.
*/
if (x->km.state == XFRM_STATE_VALID) {
if ((x->sel.family &&
!xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
!security_xfrm_state_pol_flow_match(x, pol, fl))
return;
if (!*best ||
(*best)->km.dying > x->km.dying ||
((*best)->km.dying == x->km.dying &&
(*best)->curlft.add_time < x->curlft.add_time))
*best = x;
} else if (x->km.state == XFRM_STATE_ACQ) {
*acq_in_progress = 1;
} else if (x->km.state == XFRM_STATE_ERROR ||
x->km.state == XFRM_STATE_EXPIRED) {
if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
security_xfrm_state_pol_flow_match(x, pol, fl))
*error = -ESRCH;
}
}
struct xfrm_state *
xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
const struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
unsigned short family)
{
static xfrm_address_t saddr_wildcard = { };
struct net *net = xp_net(pol);
unsigned int h, h_wildcard;
struct hlist_node *entry;
struct xfrm_state *x, *x0, *to_put;
int acquire_in_progress = 0;
int error = 0;
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
unsigned short encap_family = tmpl->encap_family;
to_put = NULL;
spin_lock_bh(&xfrm_state_lock);
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, encap_family,
&best, &acquire_in_progress, &error);
}
if (best)
goto found;
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, encap_family,
&best, &acquire_in_progress, &error);
}
found:
x = best;
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
(x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
tmpl->id.proto, encap_family)) != NULL) {
to_put = x0;
error = -EEXIST;
goto out;
}
x = xfrm_state_alloc(net);
if (x == NULL) {
error = -ENOMEM;
goto out;
}
/* Initialize temporary state matching only
* to current session. */
xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
if (error) {
x->km.state = XFRM_STATE_DEAD;
to_put = x;
x = NULL;
goto out;
}
if (km_query(x, tmpl, pol) == 0) {
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
h = xfrm_src_hash(net, daddr, saddr, encap_family);
hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
} else {
x->km.state = XFRM_STATE_DEAD;
to_put = x;
x = NULL;
error = -ESRCH;
}
}
out:
if (x)
xfrm_state_hold(x);
else
*err = acquire_in_progress ? -EAGAIN : error;
spin_unlock_bh(&xfrm_state_lock);
if (to_put)
xfrm_state_put(to_put);
return x;
}
struct xfrm_state *
xfrm_stateonly_find(struct net *net, u32 mark,
xfrm_address_t *daddr, xfrm_address_t *saddr,
unsigned short family, u8 mode, u8 proto, u32 reqid)
{
unsigned int h;
struct xfrm_state *rx = NULL, *x = NULL;
struct hlist_node *entry;
spin_lock(&xfrm_state_lock);
h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, family) &&
mode == x->props.mode &&
proto == x->id.proto &&
x->km.state == XFRM_STATE_VALID) {
rx = x;
break;
}
}
if (rx)
xfrm_state_hold(rx);
spin_unlock(&xfrm_state_lock);
return rx;
}
EXPORT_SYMBOL(xfrm_stateonly_find);
static void __xfrm_state_insert(struct xfrm_state *x)
{
struct net *net = xs_net(x);
unsigned int h;
list_add(&x->km.all, &net->xfrm.state_all);
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family);
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
x->props.family);
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
}
tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
wake_up(&net->xfrm.km_waitq);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
}
/* xfrm_state_lock is held */
static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
{
struct net *net = xs_net(xnew);
unsigned short family = xnew->props.family;
u32 reqid = xnew->props.reqid;
struct xfrm_state *x;
struct hlist_node *entry;
unsigned int h;
u32 mark = xnew->mark.v & xnew->mark.m;
h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
(mark & x->mark.m) == x->mark.v &&
!xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
!xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
x->genid++;
}
}
void xfrm_state_insert(struct xfrm_state *x)
{
spin_lock_bh(&xfrm_state_lock);
__xfrm_state_bump_genids(x);
__xfrm_state_insert(x);
spin_unlock_bh(&xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_state_insert);
/* xfrm_state_lock is held */
static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
unsigned short family, u8 mode,
u32 reqid, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct hlist_node *entry;
struct xfrm_state *x;
u32 mark = m->v & m->m;
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.reqid != reqid ||
x->props.mode != mode ||
x->props.family != family ||
x->km.state != XFRM_STATE_ACQ ||
x->id.spi != 0 ||
x->id.proto != proto ||
(mark & x->mark.m) != x->mark.v ||
xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
xfrm_state_hold(x);
return x;
}
if (!create)
return NULL;
x = xfrm_state_alloc(net);
if (likely(x)) {
switch (family) {
case AF_INET:
x->sel.daddr.a4 = daddr->a4;
x->sel.saddr.a4 = saddr->a4;
x->sel.prefixlen_d = 32;
x->sel.prefixlen_s = 32;
x->props.saddr.a4 = saddr->a4;
x->id.daddr.a4 = daddr->a4;
break;
case AF_INET6:
*(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
*(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
x->sel.prefixlen_d = 128;
x->sel.prefixlen_s = 128;
*(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
*(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
break;
}
x->km.state = XFRM_STATE_ACQ;
x->id.proto = proto;
x->props.family = family;
x->props.mode = mode;
x->props.reqid = reqid;
x->mark.v = m->v;
x->mark.m = m->m;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
xfrm_state_hold(x);
tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
h = xfrm_src_hash(net, daddr, saddr, family);
hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
}
return x;
}
static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_add(struct xfrm_state *x)
{
struct net *net = xs_net(x);
struct xfrm_state *x1, *to_put;
int family;
int err;
u32 mark = x->mark.v & x->mark.m;
int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
family = x->props.family;
to_put = NULL;
spin_lock_bh(&xfrm_state_lock);
x1 = __xfrm_state_locate(x, use_spi, family);
if (x1) {
to_put = x1;
x1 = NULL;
err = -EEXIST;
goto out;
}
if (use_spi && x->km.seq) {
x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
if (x1 && ((x1->id.proto != x->id.proto) ||
xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
to_put = x1;
x1 = NULL;
}
}
if (use_spi && !x1)
x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
x->props.reqid, x->id.proto,
&x->id.daddr, &x->props.saddr, 0);
__xfrm_state_bump_genids(x);
__xfrm_state_insert(x);
err = 0;
out:
spin_unlock_bh(&xfrm_state_lock);
if (x1) {
xfrm_state_delete(x1);
xfrm_state_put(x1);
}
if (to_put)
xfrm_state_put(to_put);
return err;
}
EXPORT_SYMBOL(xfrm_state_add);
#ifdef CONFIG_XFRM_MIGRATE
static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
{
struct net *net = xs_net(orig);
int err = -ENOMEM;
struct xfrm_state *x = xfrm_state_alloc(net);
if (!x)
goto out;
memcpy(&x->id, &orig->id, sizeof(x->id));
memcpy(&x->sel, &orig->sel, sizeof(x->sel));
memcpy(&x->lft, &orig->lft, sizeof(x->lft));
x->props.mode = orig->props.mode;
x->props.replay_window = orig->props.replay_window;
x->props.reqid = orig->props.reqid;
x->props.family = orig->props.family;
x->props.saddr = orig->props.saddr;
if (orig->aalg) {
x->aalg = xfrm_algo_auth_clone(orig->aalg);
if (!x->aalg)
goto error;
}
x->props.aalgo = orig->props.aalgo;
if (orig->ealg) {
x->ealg = xfrm_algo_clone(orig->ealg);
if (!x->ealg)
goto error;
}
x->props.ealgo = orig->props.ealgo;
if (orig->calg) {
x->calg = xfrm_algo_clone(orig->calg);
if (!x->calg)
goto error;
}
x->props.calgo = orig->props.calgo;
if (orig->encap) {
x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
if (!x->encap)
goto error;
}
if (orig->coaddr) {
x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
GFP_KERNEL);
if (!x->coaddr)
goto error;
}
if (orig->replay_esn) {
err = xfrm_replay_clone(x, orig);
if (err)
goto error;
}
memcpy(&x->mark, &orig->mark, sizeof(x->mark));
err = xfrm_init_state(x);
if (err)
goto error;
x->props.flags = orig->props.flags;
x->curlft.add_time = orig->curlft.add_time;
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
return x;
error:
xfrm_state_put(x);
out:
if (errp)
*errp = err;
return NULL;
}
/* xfrm_state_lock is held */
struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
{
unsigned int h;
struct xfrm_state *x;
struct hlist_node *entry;
if (m->reqid) {
h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr,
m->reqid, m->old_family);
hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
if (m->reqid && x->props.reqid != m->reqid)
continue;
if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
m->old_family) ||
xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
m->old_family))
continue;
xfrm_state_hold(x);
return x;
}
} else {
h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr,
m->old_family);
hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
m->old_family) ||
xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
m->old_family))
continue;
xfrm_state_hold(x);
return x;
}
}
return NULL;
}
EXPORT_SYMBOL(xfrm_migrate_state_find);
struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m)
{
struct xfrm_state *xc;
int err;
xc = xfrm_state_clone(x, &err);
if (!xc)
return NULL;
memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
/* add state */
if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
/* a care is needed when the destination address of the
state is to be updated as it is a part of triplet */
xfrm_state_insert(xc);
} else {
if ((err = xfrm_state_add(xc)) < 0)
goto error;
}
return xc;
error:
xfrm_state_put(xc);
return NULL;
}
EXPORT_SYMBOL(xfrm_state_migrate);
#endif
int xfrm_state_update(struct xfrm_state *x)
{
struct xfrm_state *x1, *to_put;
int err;
int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
to_put = NULL;
spin_lock_bh(&xfrm_state_lock);
x1 = __xfrm_state_locate(x, use_spi, x->props.family);
err = -ESRCH;
if (!x1)
goto out;
if (xfrm_state_kern(x1)) {
to_put = x1;
err = -EEXIST;
goto out;
}
if (x1->km.state == XFRM_STATE_ACQ) {
__xfrm_state_insert(x);
x = NULL;
}
err = 0;
out:
spin_unlock_bh(&xfrm_state_lock);
if (to_put)
xfrm_state_put(to_put);
if (err)
return err;
if (!x) {
xfrm_state_delete(x1);
xfrm_state_put(x1);
return 0;
}
err = -EINVAL;
spin_lock_bh(&x1->lock);
if (likely(x1->km.state == XFRM_STATE_VALID)) {
if (x->encap && x1->encap)
memcpy(x1->encap, x->encap, sizeof(*x1->encap));
if (x->coaddr && x1->coaddr) {
memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
}
if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
x1->km.dying = 0;
tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
err = 0;
x->km.state = XFRM_STATE_DEAD;
__xfrm_state_put(x);
}
spin_unlock_bh(&x1->lock);
xfrm_state_put(x1);
return err;
}
EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
if (!x->curlft.use_time)
x->curlft.use_time = get_seconds();
if (x->km.state != XFRM_STATE_VALID)
return -EINVAL;
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
x->km.state = XFRM_STATE_EXPIRED;
tasklet_hrtimer_start(&x->mtimer, ktime_set(0,0), HRTIMER_MODE_REL);
return -EINVAL;
}
if (!x->km.dying &&
(x->curlft.bytes >= x->lft.soft_byte_limit ||
x->curlft.packets >= x->lft.soft_packet_limit)) {
x->km.dying = 1;
km_state_expired(x, 0, 0);
}
return 0;
}
EXPORT_SYMBOL(xfrm_state_check_expire);
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
spin_unlock_bh(&xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup);
struct xfrm_state *
xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
spin_unlock_bh(&xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
int create, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
spin_unlock_bh(&xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_find_acq);
#ifdef CONFIG_XFRM_SUB_POLICY
int
xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
unsigned short family)
{
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_state_lock);
if (afinfo->tmpl_sort)
err = afinfo->tmpl_sort(dst, src, n);
spin_unlock_bh(&xfrm_state_lock);
xfrm_state_put_afinfo(afinfo);
return err;
}
EXPORT_SYMBOL(xfrm_tmpl_sort);
int
xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
unsigned short family)
{
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_state_lock);
if (afinfo->state_sort)
err = afinfo->state_sort(dst, src, n);
spin_unlock_bh(&xfrm_state_lock);
xfrm_state_put_afinfo(afinfo);
return err;
}
EXPORT_SYMBOL(xfrm_state_sort);
#endif
/* Silly enough, but I'm lazy to build resolution list */
static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
int i;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct hlist_node *entry;
struct xfrm_state *x;
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
if (x->km.seq == seq &&
(mark & x->mark.m) == x->mark.v &&
x->km.state == XFRM_STATE_ACQ) {
xfrm_state_hold(x);
return x;
}
}
}
return NULL;
}
struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
x = __xfrm_find_acq_byseq(net, mark, seq);
spin_unlock_bh(&xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_find_acq_byseq);
u32 xfrm_get_acqseq(void)
{
u32 res;
static atomic_t acqseq;
do {
res = atomic_inc_return(&acqseq);
} while (!res);
return res;
}
EXPORT_SYMBOL(xfrm_get_acqseq);
int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
{
struct net *net = xs_net(x);
unsigned int h;
struct xfrm_state *x0;
int err = -ENOENT;
__be32 minspi = htonl(low);
__be32 maxspi = htonl(high);
u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_DEAD)
goto unlock;
err = 0;
if (x->id.spi)
goto unlock;
err = -ENOENT;
if (minspi == maxspi) {
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
if (x0) {
xfrm_state_put(x0);
goto unlock;
}
x->id.spi = minspi;
} else {
u32 spi = 0;
for (h=0; h<high-low+1; h++) {
spi = low + net_random()%(high-low+1);
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
x->id.spi = htonl(spi);
break;
}
xfrm_state_put(x0);
}
}
if (x->id.spi) {
spin_lock_bh(&xfrm_state_lock);
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
spin_unlock_bh(&xfrm_state_lock);
err = 0;
}
unlock:
spin_unlock_bh(&x->lock);
return err;
}
EXPORT_SYMBOL(xfrm_alloc_spi);
int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*),
void *data)
{
struct xfrm_state *state;
struct xfrm_state_walk *x;
int err = 0;
if (walk->seq != 0 && list_empty(&walk->all))
return 0;
spin_lock_bh(&xfrm_state_lock);
if (list_empty(&walk->all))
x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
else
x = list_entry(&walk->all, struct xfrm_state_walk, all);
list_for_each_entry_from(x, &net->xfrm.state_all, all) {
if (x->state == XFRM_STATE_DEAD)
continue;
state = container_of(x, struct xfrm_state, km);
if (!xfrm_id_proto_match(state->id.proto, walk->proto))
continue;
err = func(state, walk->seq, data);
if (err) {
list_move_tail(&walk->all, &x->all);
goto out;
}
walk->seq++;
}
if (walk->seq == 0) {
err = -ENOENT;
goto out;
}
list_del_init(&walk->all);
out:
spin_unlock_bh(&xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_walk);
void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
{
INIT_LIST_HEAD(&walk->all);
walk->proto = proto;
walk->state = XFRM_STATE_DEAD;
walk->seq = 0;
}
EXPORT_SYMBOL(xfrm_state_walk_init);
void xfrm_state_walk_done(struct xfrm_state_walk *walk)
{
if (list_empty(&walk->all))
return;
spin_lock_bh(&xfrm_state_lock);
list_del(&walk->all);
spin_unlock_bh(&xfrm_state_lock);
}
EXPORT_SYMBOL(xfrm_state_walk_done);
static void xfrm_replay_timer_handler(unsigned long data)
{
struct xfrm_state *x = (struct xfrm_state*)data;
spin_lock(&x->lock);
if (x->km.state == XFRM_STATE_VALID) {
if (xfrm_aevent_is_on(xs_net(x)))
x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
else
x->xflags |= XFRM_TIME_DEFER;
}
spin_unlock(&x->lock);
}
static LIST_HEAD(xfrm_km_list);
static DEFINE_RWLOCK(xfrm_km_lock);
void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list)
if (km->notify_policy)
km->notify_policy(xp, dir, c);
read_unlock(&xfrm_km_lock);
}
void km_state_notify(struct xfrm_state *x, const struct km_event *c)
{
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list)
if (km->notify)
km->notify(x, c);
read_unlock(&xfrm_km_lock);
}
EXPORT_SYMBOL(km_policy_notify);
EXPORT_SYMBOL(km_state_notify);
void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
{
struct net *net = xs_net(x);
struct km_event c;
c.data.hard = hard;
c.pid = pid;
c.event = XFRM_MSG_EXPIRE;
km_state_notify(x, &c);
if (hard)
wake_up(&net->xfrm.km_waitq);
}
EXPORT_SYMBOL(km_state_expired);
/*
* We send to all registered managers regardless of failure
* We are happy with one success
*/
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
{
int err = -EINVAL, acqret;
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
if (!acqret)
err = acqret;
}
read_unlock(&xfrm_km_lock);
return err;
}
EXPORT_SYMBOL(km_query);
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
{
int err = -EINVAL;
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
if (km->new_mapping)
err = km->new_mapping(x, ipaddr, sport);
if (!err)
break;
}
read_unlock(&xfrm_km_lock);
return err;
}
EXPORT_SYMBOL(km_new_mapping);
void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
{
struct net *net = xp_net(pol);
struct km_event c;
c.data.hard = hard;
c.pid = pid;
c.event = XFRM_MSG_POLEXPIRE;
km_policy_notify(pol, dir, &c);
if (hard)
wake_up(&net->xfrm.km_waitq);
}
EXPORT_SYMBOL(km_policy_expired);
#ifdef CONFIG_XFRM_MIGRATE
int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_migrate,
const struct xfrm_kmaddress *k)
{
int err = -EINVAL;
int ret;
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
if (km->migrate) {
ret = km->migrate(sel, dir, type, m, num_migrate, k);
if (!ret)
err = ret;
}
}
read_unlock(&xfrm_km_lock);
return err;
}
EXPORT_SYMBOL(km_migrate);
#endif
int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
{
int err = -EINVAL;
int ret;
struct xfrm_mgr *km;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
if (km->report) {
ret = km->report(net, proto, sel, addr);
if (!ret)
err = ret;
}
}
read_unlock(&xfrm_km_lock);
return err;
}
EXPORT_SYMBOL(km_report);
int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
{
int err;
u8 *data;
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
if (optlen <= 0 || optlen > PAGE_SIZE)
return -EMSGSIZE;
data = kmalloc(optlen, GFP_KERNEL);
if (!data)
return -ENOMEM;
err = -EFAULT;
if (copy_from_user(data, optval, optlen))
goto out;
err = -EINVAL;
read_lock(&xfrm_km_lock);
list_for_each_entry(km, &xfrm_km_list, list) {
pol = km->compile_policy(sk, optname, data,
optlen, &err);
if (err >= 0)
break;
}
read_unlock(&xfrm_km_lock);
if (err >= 0) {
xfrm_sk_policy_insert(sk, err, pol);
xfrm_pol_put(pol);
err = 0;
}
out:
kfree(data);
return err;
}
EXPORT_SYMBOL(xfrm_user_policy);
int xfrm_register_km(struct xfrm_mgr *km)
{
write_lock_bh(&xfrm_km_lock);
list_add_tail(&km->list, &xfrm_km_list);
write_unlock_bh(&xfrm_km_lock);
return 0;
}
EXPORT_SYMBOL(xfrm_register_km);
int xfrm_unregister_km(struct xfrm_mgr *km)
{
write_lock_bh(&xfrm_km_lock);
list_del(&km->list);
write_unlock_bh(&xfrm_km_lock);
return 0;
}
EXPORT_SYMBOL(xfrm_unregister_km);
int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
{
int err = 0;
if (unlikely(afinfo == NULL))
return -EINVAL;
if (unlikely(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
write_lock_bh(&xfrm_state_afinfo_lock);
if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
err = -ENOBUFS;
else
xfrm_state_afinfo[afinfo->family] = afinfo;
write_unlock_bh(&xfrm_state_afinfo_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_register_afinfo);
int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
{
int err = 0;
if (unlikely(afinfo == NULL))
return -EINVAL;
if (unlikely(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
write_lock_bh(&xfrm_state_afinfo_lock);
if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
err = -EINVAL;
else
xfrm_state_afinfo[afinfo->family] = NULL;
}
write_unlock_bh(&xfrm_state_afinfo_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
{
struct xfrm_state_afinfo *afinfo;
if (unlikely(family >= NPROTO))
return NULL;
read_lock(&xfrm_state_afinfo_lock);
afinfo = xfrm_state_afinfo[family];
if (unlikely(!afinfo))
read_unlock(&xfrm_state_afinfo_lock);
return afinfo;
}
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
__releases(xfrm_state_afinfo_lock)
{
read_unlock(&xfrm_state_afinfo_lock);
}
/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
void xfrm_state_delete_tunnel(struct xfrm_state *x)
{
if (x->tunnel) {
struct xfrm_state *t = x->tunnel;
if (atomic_read(&t->tunnel_users) == 2)
xfrm_state_delete(t);
atomic_dec(&t->tunnel_users);
xfrm_state_put(t);
x->tunnel = NULL;
}
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
int xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
int res;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_VALID &&
x->type && x->type->get_mtu)
res = x->type->get_mtu(x, mtu);
else
res = mtu - x->props.header_len;
spin_unlock_bh(&x->lock);
return res;
}
int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
{
struct xfrm_state_afinfo *afinfo;
struct xfrm_mode *inner_mode;
int family = x->props.family;
int err;
err = -EAFNOSUPPORT;
afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
goto error;
err = 0;
if (afinfo->init_flags)
err = afinfo->init_flags(x);
xfrm_state_put_afinfo(afinfo);
if (err)
goto error;
err = -EPROTONOSUPPORT;
if (x->sel.family != AF_UNSPEC) {
inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
if (inner_mode == NULL)
goto error;
if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
family != x->sel.family) {
xfrm_put_mode(inner_mode);
goto error;
}
x->inner_mode = inner_mode;
} else {
struct xfrm_mode *inner_mode_iaf;
int iafamily = AF_INET;
inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
if (inner_mode == NULL)
goto error;
if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
xfrm_put_mode(inner_mode);
goto error;
}
x->inner_mode = inner_mode;
if (x->props.family == AF_INET)
iafamily = AF_INET6;
inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
if (inner_mode_iaf) {
if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
x->inner_mode_iaf = inner_mode_iaf;
else
xfrm_put_mode(inner_mode_iaf);
}
}
x->type = xfrm_get_type(x->id.proto, family);
if (x->type == NULL)
goto error;
err = x->type->init_state(x);
if (err)
goto error;
x->outer_mode = xfrm_get_mode(x->props.mode, family);
if (x->outer_mode == NULL)
goto error;
if (init_replay) {
err = xfrm_init_replay(x);
if (err)
goto error;
}
x->km.state = XFRM_STATE_VALID;
error:
return err;
}
EXPORT_SYMBOL(__xfrm_init_state);
int xfrm_init_state(struct xfrm_state *x)
{
return __xfrm_init_state(x, true);
}
EXPORT_SYMBOL(xfrm_init_state);
int __net_init xfrm_state_init(struct net *net)
{
unsigned int sz;
INIT_LIST_HEAD(&net->xfrm.state_all);
sz = sizeof(struct hlist_head) * 8;
net->xfrm.state_bydst = xfrm_hash_alloc(sz);
if (!net->xfrm.state_bydst)
goto out_bydst;
net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
if (!net->xfrm.state_bysrc)
goto out_bysrc;
net->xfrm.state_byspi = xfrm_hash_alloc(sz);
if (!net->xfrm.state_byspi)
goto out_byspi;
net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
net->xfrm.state_num = 0;
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
init_waitqueue_head(&net->xfrm.km_waitq);
return 0;
out_byspi:
xfrm_hash_free(net->xfrm.state_bysrc, sz);
out_bysrc:
xfrm_hash_free(net->xfrm.state_bydst, sz);
out_bydst:
return -ENOMEM;
}
void xfrm_state_fini(struct net *net)
{
struct xfrm_audit audit_info;
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
audit_info.loginuid = -1;
audit_info.sessionid = -1;
audit_info.secid = 0;
xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info);
flush_work(&net->xfrm.state_gc_work);
WARN_ON(!list_empty(&net->xfrm.state_all));
sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
WARN_ON(!hlist_empty(net->xfrm.state_byspi));
xfrm_hash_free(net->xfrm.state_byspi, sz);
WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
xfrm_hash_free(net->xfrm.state_bysrc, sz);
WARN_ON(!hlist_empty(net->xfrm.state_bydst));
xfrm_hash_free(net->xfrm.state_bydst, sz);
}
#ifdef CONFIG_AUDITSYSCALL
static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
struct audit_buffer *audit_buf)
{
struct xfrm_sec_ctx *ctx = x->security;
u32 spi = ntohl(x->id.spi);
if (ctx)
audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
switch(x->props.family) {
case AF_INET:
audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
&x->props.saddr.a4, &x->id.daddr.a4);
break;
case AF_INET6:
audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
x->props.saddr.a6, x->id.daddr.a6);
break;
}
audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
}
static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
struct audit_buffer *audit_buf)
{
const struct iphdr *iph4;
const struct ipv6hdr *iph6;
switch (family) {
case AF_INET:
iph4 = ip_hdr(skb);
audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
&iph4->saddr, &iph4->daddr);
break;
case AF_INET6:
iph6 = ipv6_hdr(skb);
audit_log_format(audit_buf,
" src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
&iph6->saddr,&iph6->daddr,
iph6->flow_lbl[0] & 0x0f,
iph6->flow_lbl[1],
iph6->flow_lbl[2]);
break;
}
}
void xfrm_audit_state_add(struct xfrm_state *x, int result,
uid_t auid, u32 sessionid, u32 secid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SAD-add");
if (audit_buf == NULL)
return;
xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
xfrm_audit_helper_sainfo(x, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
void xfrm_audit_state_delete(struct xfrm_state *x, int result,
uid_t auid, u32 sessionid, u32 secid)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SAD-delete");
if (audit_buf == NULL)
return;
xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
xfrm_audit_helper_sainfo(x, audit_buf);
audit_log_format(audit_buf, " res=%u", result);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
struct sk_buff *skb)
{
struct audit_buffer *audit_buf;
u32 spi;
audit_buf = xfrm_audit_start("SA-replay-overflow");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
/* don't record the sequence number because it's inherent in this kind
* of audit message */
spi = ntohl(x->id.spi);
audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
void xfrm_audit_state_replay(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
struct audit_buffer *audit_buf;
u32 spi;
audit_buf = xfrm_audit_start("SA-replayed-pkt");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
spi = ntohl(x->id.spi);
audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
spi, spi, ntohl(net_seq));
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
{
struct audit_buffer *audit_buf;
audit_buf = xfrm_audit_start("SA-notfound");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, family, audit_buf);
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
__be32 net_spi, __be32 net_seq)
{
struct audit_buffer *audit_buf;
u32 spi;
audit_buf = xfrm_audit_start("SA-notfound");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, family, audit_buf);
spi = ntohl(net_spi);
audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
spi, spi, ntohl(net_seq));
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
void xfrm_audit_state_icvfail(struct xfrm_state *x,
struct sk_buff *skb, u8 proto)
{
struct audit_buffer *audit_buf;
__be32 net_spi;
__be32 net_seq;
audit_buf = xfrm_audit_start("SA-icv-failure");
if (audit_buf == NULL)
return;
xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
u32 spi = ntohl(net_spi);
audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
spi, spi, ntohl(net_seq));
}
audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
#endif /* CONFIG_AUDITSYSCALL */
| gpl-2.0 |
DeltaDroidTeam/android_kernel_lenovo_msm8x25q | sound/soc/codecs/wm9713.c | 4860 | 42340 | /*
* wm9713.c -- ALSA Soc WM9713 codec support
*
* Copyright 2006 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Features:-
*
* o Support for AC97 Codec, Voice DAC and Aux DAC
* o Support for DAPM
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include <sound/soc.h>
#include "wm9713.h"
struct wm9713_priv {
u32 pll_in; /* PLL input frequency */
};
static unsigned int ac97_read(struct snd_soc_codec *codec,
unsigned int reg);
static int ac97_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int val);
/*
* WM9713 register cache
* Reg 0x3c bit 15 is used by touch driver.
*/
static const u16 wm9713_reg[] = {
0x6174, 0x8080, 0x8080, 0x8080,
0xc880, 0xe808, 0xe808, 0x0808,
0x00da, 0x8000, 0xd600, 0xaaa0,
0xaaa0, 0xaaa0, 0x0000, 0x0000,
0x0f0f, 0x0040, 0x0000, 0x7f00,
0x0405, 0x0410, 0xbb80, 0xbb80,
0x0000, 0xbb80, 0x0000, 0x4523,
0x0000, 0x2000, 0x7eff, 0xffff,
0x0000, 0x0000, 0x0080, 0x0000,
0x0000, 0x0000, 0xfffe, 0xffff,
0x0000, 0x0000, 0x0000, 0xfffe,
0x4000, 0x0000, 0x0000, 0x0000,
0xb032, 0x3e00, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0006,
0x0001, 0x0000, 0x574d, 0x4c13,
0x0000, 0x0000, 0x0000
};
/* virtual HP mixers regs */
#define HPL_MIXER 0x80
#define HPR_MIXER 0x82
#define MICB_MUX 0x82
static const char *wm9713_mic_mixer[] = {"Stereo", "Mic 1", "Mic 2", "Mute"};
static const char *wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"};
static const char *wm9713_rec_src[] =
{"Mic 1", "Mic 2", "Line", "Mono In", "Headphone", "Speaker",
"Mono Out", "Zh"};
static const char *wm9713_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"};
static const char *wm9713_alc_select[] = {"None", "Left", "Right", "Stereo"};
static const char *wm9713_mono_pga[] = {"Vmid", "Zh", "Mono", "Inv",
"Mono Vmid", "Inv Vmid"};
static const char *wm9713_spk_pga[] =
{"Vmid", "Zh", "Headphone", "Speaker", "Inv", "Headphone Vmid",
"Speaker Vmid", "Inv Vmid"};
static const char *wm9713_hp_pga[] = {"Vmid", "Zh", "Headphone",
"Headphone Vmid"};
static const char *wm9713_out3_pga[] = {"Vmid", "Zh", "Inv 1", "Inv 1 Vmid"};
static const char *wm9713_out4_pga[] = {"Vmid", "Zh", "Inv 2", "Inv 2 Vmid"};
static const char *wm9713_dac_inv[] =
{"Off", "Mono", "Speaker", "Left Headphone", "Right Headphone",
"Headphone Mono", "NC", "Vmid"};
static const char *wm9713_bass[] = {"Linear Control", "Adaptive Boost"};
static const char *wm9713_ng_type[] = {"Constant Gain", "Mute"};
static const char *wm9713_mic_select[] = {"Mic 1", "Mic 2 A", "Mic 2 B"};
static const char *wm9713_micb_select[] = {"MPB", "MPA"};
static const struct soc_enum wm9713_enum[] = {
SOC_ENUM_SINGLE(AC97_LINE, 3, 4, wm9713_mic_mixer), /* record mic mixer 0 */
SOC_ENUM_SINGLE(AC97_VIDEO, 14, 4, wm9713_rec_mux), /* record mux hp 1 */
SOC_ENUM_SINGLE(AC97_VIDEO, 9, 4, wm9713_rec_mux), /* record mux mono 2 */
SOC_ENUM_SINGLE(AC97_VIDEO, 3, 8, wm9713_rec_src), /* record mux left 3 */
SOC_ENUM_SINGLE(AC97_VIDEO, 0, 8, wm9713_rec_src), /* record mux right 4*/
SOC_ENUM_DOUBLE(AC97_CD, 14, 6, 2, wm9713_rec_gain), /* record step size 5 */
SOC_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9713_alc_select), /* alc source select 6*/
SOC_ENUM_SINGLE(AC97_REC_GAIN, 14, 4, wm9713_mono_pga), /* mono input select 7 */
SOC_ENUM_SINGLE(AC97_REC_GAIN, 11, 8, wm9713_spk_pga), /* speaker left input select 8 */
SOC_ENUM_SINGLE(AC97_REC_GAIN, 8, 8, wm9713_spk_pga), /* speaker right input select 9 */
SOC_ENUM_SINGLE(AC97_REC_GAIN, 6, 3, wm9713_hp_pga), /* headphone left input 10 */
SOC_ENUM_SINGLE(AC97_REC_GAIN, 4, 3, wm9713_hp_pga), /* headphone right input 11 */
SOC_ENUM_SINGLE(AC97_REC_GAIN, 2, 4, wm9713_out3_pga), /* out 3 source 12 */
SOC_ENUM_SINGLE(AC97_REC_GAIN, 0, 4, wm9713_out4_pga), /* out 4 source 13 */
SOC_ENUM_SINGLE(AC97_REC_GAIN_MIC, 13, 8, wm9713_dac_inv), /* dac invert 1 14 */
SOC_ENUM_SINGLE(AC97_REC_GAIN_MIC, 10, 8, wm9713_dac_inv), /* dac invert 2 15 */
SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, wm9713_bass), /* bass control 16 */
SOC_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9713_ng_type), /* noise gate type 17 */
SOC_ENUM_SINGLE(AC97_3D_CONTROL, 12, 3, wm9713_mic_select), /* mic selection 18 */
SOC_ENUM_SINGLE(MICB_MUX, 0, 2, wm9713_micb_select), /* mic selection 19 */
};
static const DECLARE_TLV_DB_SCALE(out_tlv, -4650, 150, 0);
static const DECLARE_TLV_DB_SCALE(main_tlv, -3450, 150, 0);
static const DECLARE_TLV_DB_SCALE(misc_tlv, -1500, 300, 0);
static unsigned int mic_tlv[] = {
TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0),
};
static const struct snd_kcontrol_new wm9713_snd_ac97_controls[] = {
SOC_DOUBLE_TLV("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1, out_tlv),
SOC_DOUBLE("Speaker Playback Switch", AC97_MASTER, 15, 7, 1, 1),
SOC_DOUBLE_TLV("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1,
out_tlv),
SOC_DOUBLE("Headphone Playback Switch", AC97_HEADPHONE, 15, 7, 1, 1),
SOC_DOUBLE_TLV("Line In Volume", AC97_PC_BEEP, 8, 0, 31, 1, main_tlv),
SOC_DOUBLE_TLV("PCM Playback Volume", AC97_PHONE, 8, 0, 31, 1, main_tlv),
SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv),
SOC_SINGLE_TLV("Mic 2 Volume", AC97_MIC, 0, 31, 1, main_tlv),
SOC_SINGLE_TLV("Mic 1 Preamp Volume", AC97_3D_CONTROL, 10, 3, 0, mic_tlv),
SOC_SINGLE_TLV("Mic 2 Preamp Volume", AC97_3D_CONTROL, 12, 3, 0, mic_tlv),
SOC_SINGLE("Mic Boost (+20dB) Switch", AC97_LINE, 5, 1, 0),
SOC_SINGLE("Mic Headphone Mixer Volume", AC97_LINE, 0, 7, 1),
SOC_SINGLE("Capture Switch", AC97_CD, 15, 1, 1),
SOC_ENUM("Capture Volume Steps", wm9713_enum[5]),
SOC_DOUBLE("Capture Volume", AC97_CD, 8, 0, 31, 0),
SOC_SINGLE("Capture ZC Switch", AC97_CD, 7, 1, 0),
SOC_SINGLE_TLV("Capture to Headphone Volume", AC97_VIDEO, 11, 7, 1, misc_tlv),
SOC_SINGLE("Capture to Mono Boost (+20dB) Switch", AC97_VIDEO, 8, 1, 0),
SOC_SINGLE("Capture ADC Boost (+20dB) Switch", AC97_VIDEO, 6, 1, 0),
SOC_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0),
SOC_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0),
SOC_SINGLE("ALC Decay Time", AC97_CODEC_CLASS_REV, 4, 15, 0),
SOC_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0),
SOC_ENUM("ALC Function", wm9713_enum[6]),
SOC_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 0),
SOC_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 0),
SOC_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0),
SOC_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0),
SOC_ENUM("ALC NG Type", wm9713_enum[17]),
SOC_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 0),
SOC_DOUBLE("Speaker Playback ZC Switch", AC97_MASTER, 14, 6, 1, 0),
SOC_DOUBLE("Headphone Playback ZC Switch", AC97_HEADPHONE, 14, 6, 1, 0),
SOC_SINGLE("Out4 Playback Switch", AC97_MASTER_MONO, 15, 1, 1),
SOC_SINGLE("Out4 Playback ZC Switch", AC97_MASTER_MONO, 14, 1, 0),
SOC_SINGLE_TLV("Out4 Playback Volume", AC97_MASTER_MONO, 8, 31, 1, out_tlv),
SOC_SINGLE("Out3 Playback Switch", AC97_MASTER_MONO, 7, 1, 1),
SOC_SINGLE("Out3 Playback ZC Switch", AC97_MASTER_MONO, 6, 1, 0),
SOC_SINGLE_TLV("Out3 Playback Volume", AC97_MASTER_MONO, 0, 31, 1, out_tlv),
SOC_SINGLE_TLV("Mono Capture Volume", AC97_MASTER_TONE, 8, 31, 1, main_tlv),
SOC_SINGLE("Mono Playback Switch", AC97_MASTER_TONE, 7, 1, 1),
SOC_SINGLE("Mono Playback ZC Switch", AC97_MASTER_TONE, 6, 1, 0),
SOC_SINGLE_TLV("Mono Playback Volume", AC97_MASTER_TONE, 0, 31, 1, out_tlv),
SOC_SINGLE_TLV("Headphone Mixer Beep Playback Volume", AC97_AUX, 12, 7, 1,
misc_tlv),
SOC_SINGLE_TLV("Speaker Mixer Beep Playback Volume", AC97_AUX, 8, 7, 1,
misc_tlv),
SOC_SINGLE_TLV("Mono Mixer Beep Playback Volume", AC97_AUX, 4, 7, 1, misc_tlv),
SOC_SINGLE_TLV("Voice Playback Headphone Volume", AC97_PCM, 12, 7, 1,
misc_tlv),
SOC_SINGLE("Voice Playback Master Volume", AC97_PCM, 8, 7, 1),
SOC_SINGLE("Voice Playback Mono Volume", AC97_PCM, 4, 7, 1),
SOC_SINGLE_TLV("Headphone Mixer Aux Playback Volume", AC97_REC_SEL, 12, 7, 1,
misc_tlv),
SOC_SINGLE_TLV("Speaker Mixer Voice Playback Volume", AC97_PCM, 8, 7, 1,
misc_tlv),
SOC_SINGLE_TLV("Speaker Mixer Aux Playback Volume", AC97_REC_SEL, 8, 7, 1,
misc_tlv),
SOC_SINGLE_TLV("Mono Mixer Voice Playback Volume", AC97_PCM, 4, 7, 1,
misc_tlv),
SOC_SINGLE_TLV("Mono Mixer Aux Playback Volume", AC97_REC_SEL, 4, 7, 1,
misc_tlv),
SOC_SINGLE("Aux Playback Headphone Volume", AC97_REC_SEL, 12, 7, 1),
SOC_SINGLE("Aux Playback Master Volume", AC97_REC_SEL, 8, 7, 1),
SOC_ENUM("Bass Control", wm9713_enum[16]),
SOC_SINGLE("Bass Cut-off Switch", AC97_GENERAL_PURPOSE, 12, 1, 1),
SOC_SINGLE("Tone Cut-off Switch", AC97_GENERAL_PURPOSE, 4, 1, 1),
SOC_SINGLE("Playback Attenuate (-6dB) Switch", AC97_GENERAL_PURPOSE, 6, 1, 0),
SOC_SINGLE("Bass Volume", AC97_GENERAL_PURPOSE, 8, 15, 1),
SOC_SINGLE("Tone Volume", AC97_GENERAL_PURPOSE, 0, 15, 1),
SOC_SINGLE("3D Upper Cut-off Switch", AC97_REC_GAIN_MIC, 5, 1, 0),
SOC_SINGLE("3D Lower Cut-off Switch", AC97_REC_GAIN_MIC, 4, 1, 0),
SOC_SINGLE("3D Depth", AC97_REC_GAIN_MIC, 0, 15, 1),
};
static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
u16 status, rate;
BUG_ON(event != SND_SOC_DAPM_PRE_PMD);
/* Gracefully shut down the voice interface. */
status = ac97_read(codec, AC97_EXTENDED_MID) | 0x1000;
rate = ac97_read(codec, AC97_HANDSET_RATE) & 0xF0FF;
ac97_write(codec, AC97_HANDSET_RATE, rate | 0x0200);
schedule_timeout_interruptible(msecs_to_jiffies(1));
ac97_write(codec, AC97_HANDSET_RATE, rate | 0x0F00);
ac97_write(codec, AC97_EXTENDED_MID, status);
return 0;
}
/* We have to create a fake left and right HP mixers because
* the codec only has a single control that is shared by both channels.
* This makes it impossible to determine the audio path using the current
* register map, thus we add a new (virtual) register to help determine the
* audio route within the device.
*/
static int mixer_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
u16 l, r, beep, tone, phone, rec, pcm, aux;
l = ac97_read(w->codec, HPL_MIXER);
r = ac97_read(w->codec, HPR_MIXER);
beep = ac97_read(w->codec, AC97_PC_BEEP);
tone = ac97_read(w->codec, AC97_MASTER_TONE);
phone = ac97_read(w->codec, AC97_PHONE);
rec = ac97_read(w->codec, AC97_REC_SEL);
pcm = ac97_read(w->codec, AC97_PCM);
aux = ac97_read(w->codec, AC97_AUX);
if (event & SND_SOC_DAPM_PRE_REG)
return 0;
if ((l & 0x1) || (r & 0x1))
ac97_write(w->codec, AC97_PC_BEEP, beep & 0x7fff);
else
ac97_write(w->codec, AC97_PC_BEEP, beep | 0x8000);
if ((l & 0x2) || (r & 0x2))
ac97_write(w->codec, AC97_MASTER_TONE, tone & 0x7fff);
else
ac97_write(w->codec, AC97_MASTER_TONE, tone | 0x8000);
if ((l & 0x4) || (r & 0x4))
ac97_write(w->codec, AC97_PHONE, phone & 0x7fff);
else
ac97_write(w->codec, AC97_PHONE, phone | 0x8000);
if ((l & 0x8) || (r & 0x8))
ac97_write(w->codec, AC97_REC_SEL, rec & 0x7fff);
else
ac97_write(w->codec, AC97_REC_SEL, rec | 0x8000);
if ((l & 0x10) || (r & 0x10))
ac97_write(w->codec, AC97_PCM, pcm & 0x7fff);
else
ac97_write(w->codec, AC97_PCM, pcm | 0x8000);
if ((l & 0x20) || (r & 0x20))
ac97_write(w->codec, AC97_AUX, aux & 0x7fff);
else
ac97_write(w->codec, AC97_AUX, aux | 0x8000);
return 0;
}
/* Left Headphone Mixers */
static const struct snd_kcontrol_new wm9713_hpl_mixer_controls[] = {
SOC_DAPM_SINGLE("Beep Playback Switch", HPL_MIXER, 5, 1, 0),
SOC_DAPM_SINGLE("Voice Playback Switch", HPL_MIXER, 4, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", HPL_MIXER, 3, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", HPL_MIXER, 2, 1, 0),
SOC_DAPM_SINGLE("MonoIn Playback Switch", HPL_MIXER, 1, 1, 0),
SOC_DAPM_SINGLE("Bypass Playback Switch", HPL_MIXER, 0, 1, 0),
};
/* Right Headphone Mixers */
static const struct snd_kcontrol_new wm9713_hpr_mixer_controls[] = {
SOC_DAPM_SINGLE("Beep Playback Switch", HPR_MIXER, 5, 1, 0),
SOC_DAPM_SINGLE("Voice Playback Switch", HPR_MIXER, 4, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", HPR_MIXER, 3, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", HPR_MIXER, 2, 1, 0),
SOC_DAPM_SINGLE("MonoIn Playback Switch", HPR_MIXER, 1, 1, 0),
SOC_DAPM_SINGLE("Bypass Playback Switch", HPR_MIXER, 0, 1, 0),
};
/* headphone capture mux */
static const struct snd_kcontrol_new wm9713_hp_rec_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[1]);
/* headphone mic mux */
static const struct snd_kcontrol_new wm9713_hp_mic_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[0]);
/* Speaker Mixer */
static const struct snd_kcontrol_new wm9713_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Beep Playback Switch", AC97_AUX, 11, 1, 1),
SOC_DAPM_SINGLE("Voice Playback Switch", AC97_PCM, 11, 1, 1),
SOC_DAPM_SINGLE("Aux Playback Switch", AC97_REC_SEL, 11, 1, 1),
SOC_DAPM_SINGLE("PCM Playback Switch", AC97_PHONE, 14, 1, 1),
SOC_DAPM_SINGLE("MonoIn Playback Switch", AC97_MASTER_TONE, 14, 1, 1),
SOC_DAPM_SINGLE("Bypass Playback Switch", AC97_PC_BEEP, 14, 1, 1),
};
/* Mono Mixer */
static const struct snd_kcontrol_new wm9713_mono_mixer_controls[] = {
SOC_DAPM_SINGLE("Beep Playback Switch", AC97_AUX, 7, 1, 1),
SOC_DAPM_SINGLE("Voice Playback Switch", AC97_PCM, 7, 1, 1),
SOC_DAPM_SINGLE("Aux Playback Switch", AC97_REC_SEL, 7, 1, 1),
SOC_DAPM_SINGLE("PCM Playback Switch", AC97_PHONE, 13, 1, 1),
SOC_DAPM_SINGLE("MonoIn Playback Switch", AC97_MASTER_TONE, 13, 1, 1),
SOC_DAPM_SINGLE("Bypass Playback Switch", AC97_PC_BEEP, 13, 1, 1),
SOC_DAPM_SINGLE("Mic 1 Sidetone Switch", AC97_LINE, 7, 1, 1),
SOC_DAPM_SINGLE("Mic 2 Sidetone Switch", AC97_LINE, 6, 1, 1),
};
/* mono mic mux */
static const struct snd_kcontrol_new wm9713_mono_mic_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[2]);
/* mono output mux */
static const struct snd_kcontrol_new wm9713_mono_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[7]);
/* speaker left output mux */
static const struct snd_kcontrol_new wm9713_hp_spkl_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[8]);
/* speaker right output mux */
static const struct snd_kcontrol_new wm9713_hp_spkr_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[9]);
/* headphone left output mux */
static const struct snd_kcontrol_new wm9713_hpl_out_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[10]);
/* headphone right output mux */
static const struct snd_kcontrol_new wm9713_hpr_out_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[11]);
/* Out3 mux */
static const struct snd_kcontrol_new wm9713_out3_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[12]);
/* Out4 mux */
static const struct snd_kcontrol_new wm9713_out4_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[13]);
/* DAC inv mux 1 */
static const struct snd_kcontrol_new wm9713_dac_inv1_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[14]);
/* DAC inv mux 2 */
static const struct snd_kcontrol_new wm9713_dac_inv2_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[15]);
/* Capture source left */
static const struct snd_kcontrol_new wm9713_rec_srcl_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[3]);
/* Capture source right */
static const struct snd_kcontrol_new wm9713_rec_srcr_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[4]);
/* mic source */
static const struct snd_kcontrol_new wm9713_mic_sel_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[18]);
/* mic source B virtual control */
static const struct snd_kcontrol_new wm9713_micb_sel_mux_controls =
SOC_DAPM_ENUM("Route", wm9713_enum[19]);
static const struct snd_soc_dapm_widget wm9713_dapm_widgets[] = {
SND_SOC_DAPM_MUX("Capture Headphone Mux", SND_SOC_NOPM, 0, 0,
&wm9713_hp_rec_mux_controls),
SND_SOC_DAPM_MUX("Sidetone Mux", SND_SOC_NOPM, 0, 0,
&wm9713_hp_mic_mux_controls),
SND_SOC_DAPM_MUX("Capture Mono Mux", SND_SOC_NOPM, 0, 0,
&wm9713_mono_mic_mux_controls),
SND_SOC_DAPM_MUX("Mono Out Mux", SND_SOC_NOPM, 0, 0,
&wm9713_mono_mux_controls),
SND_SOC_DAPM_MUX("Left Speaker Out Mux", SND_SOC_NOPM, 0, 0,
&wm9713_hp_spkl_mux_controls),
SND_SOC_DAPM_MUX("Right Speaker Out Mux", SND_SOC_NOPM, 0, 0,
&wm9713_hp_spkr_mux_controls),
SND_SOC_DAPM_MUX("Left Headphone Out Mux", SND_SOC_NOPM, 0, 0,
&wm9713_hpl_out_mux_controls),
SND_SOC_DAPM_MUX("Right Headphone Out Mux", SND_SOC_NOPM, 0, 0,
&wm9713_hpr_out_mux_controls),
SND_SOC_DAPM_MUX("Out 3 Mux", SND_SOC_NOPM, 0, 0,
&wm9713_out3_mux_controls),
SND_SOC_DAPM_MUX("Out 4 Mux", SND_SOC_NOPM, 0, 0,
&wm9713_out4_mux_controls),
SND_SOC_DAPM_MUX("DAC Inv Mux 1", SND_SOC_NOPM, 0, 0,
&wm9713_dac_inv1_mux_controls),
SND_SOC_DAPM_MUX("DAC Inv Mux 2", SND_SOC_NOPM, 0, 0,
&wm9713_dac_inv2_mux_controls),
SND_SOC_DAPM_MUX("Left Capture Source", SND_SOC_NOPM, 0, 0,
&wm9713_rec_srcl_mux_controls),
SND_SOC_DAPM_MUX("Right Capture Source", SND_SOC_NOPM, 0, 0,
&wm9713_rec_srcr_mux_controls),
SND_SOC_DAPM_MUX("Mic A Source", SND_SOC_NOPM, 0, 0,
&wm9713_mic_sel_mux_controls),
SND_SOC_DAPM_MUX("Mic B Source", SND_SOC_NOPM, 0, 0,
&wm9713_micb_sel_mux_controls),
SND_SOC_DAPM_MIXER_E("Left HP Mixer", AC97_EXTENDED_MID, 3, 1,
&wm9713_hpl_mixer_controls[0], ARRAY_SIZE(wm9713_hpl_mixer_controls),
mixer_event, SND_SOC_DAPM_POST_REG),
SND_SOC_DAPM_MIXER_E("Right HP Mixer", AC97_EXTENDED_MID, 2, 1,
&wm9713_hpr_mixer_controls[0], ARRAY_SIZE(wm9713_hpr_mixer_controls),
mixer_event, SND_SOC_DAPM_POST_REG),
SND_SOC_DAPM_MIXER("Mono Mixer", AC97_EXTENDED_MID, 0, 1,
&wm9713_mono_mixer_controls[0], ARRAY_SIZE(wm9713_mono_mixer_controls)),
SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_EXTENDED_MID, 1, 1,
&wm9713_speaker_mixer_controls[0],
ARRAY_SIZE(wm9713_speaker_mixer_controls)),
SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback", AC97_EXTENDED_MID, 7, 1),
SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback", AC97_EXTENDED_MID, 6, 1),
SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("HP Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Line Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Capture Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_DAC_E("Voice DAC", "Voice Playback", AC97_EXTENDED_MID, 12, 1,
wm9713_voice_shutdown, SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_DAC("Aux DAC", "Aux Playback", AC97_EXTENDED_MID, 11, 1),
SND_SOC_DAPM_PGA("Left ADC", AC97_EXTENDED_MID, 5, 1, NULL, 0),
SND_SOC_DAPM_PGA("Right ADC", AC97_EXTENDED_MID, 4, 1, NULL, 0),
SND_SOC_DAPM_ADC("Left HiFi ADC", "Left HiFi Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_ADC("Right HiFi ADC", "Right HiFi Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_ADC("Left Voice ADC", "Left Voice Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_ADC("Right Voice ADC", "Right Voice Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_PGA("Left Headphone", AC97_EXTENDED_MSTATUS, 10, 1, NULL, 0),
SND_SOC_DAPM_PGA("Right Headphone", AC97_EXTENDED_MSTATUS, 9, 1, NULL, 0),
SND_SOC_DAPM_PGA("Left Speaker", AC97_EXTENDED_MSTATUS, 8, 1, NULL, 0),
SND_SOC_DAPM_PGA("Right Speaker", AC97_EXTENDED_MSTATUS, 7, 1, NULL, 0),
SND_SOC_DAPM_PGA("Out 3", AC97_EXTENDED_MSTATUS, 11, 1, NULL, 0),
SND_SOC_DAPM_PGA("Out 4", AC97_EXTENDED_MSTATUS, 12, 1, NULL, 0),
SND_SOC_DAPM_PGA("Mono Out", AC97_EXTENDED_MSTATUS, 13, 1, NULL, 0),
SND_SOC_DAPM_PGA("Left Line In", AC97_EXTENDED_MSTATUS, 6, 1, NULL, 0),
SND_SOC_DAPM_PGA("Right Line In", AC97_EXTENDED_MSTATUS, 5, 1, NULL, 0),
SND_SOC_DAPM_PGA("Mono In", AC97_EXTENDED_MSTATUS, 4, 1, NULL, 0),
SND_SOC_DAPM_PGA("Mic A PGA", AC97_EXTENDED_MSTATUS, 3, 1, NULL, 0),
SND_SOC_DAPM_PGA("Mic B PGA", AC97_EXTENDED_MSTATUS, 2, 1, NULL, 0),
SND_SOC_DAPM_PGA("Mic A Pre Amp", AC97_EXTENDED_MSTATUS, 1, 1, NULL, 0),
SND_SOC_DAPM_PGA("Mic B Pre Amp", AC97_EXTENDED_MSTATUS, 0, 1, NULL, 0),
SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_EXTENDED_MSTATUS, 14, 1),
SND_SOC_DAPM_OUTPUT("MONO"),
SND_SOC_DAPM_OUTPUT("HPL"),
SND_SOC_DAPM_OUTPUT("HPR"),
SND_SOC_DAPM_OUTPUT("SPKL"),
SND_SOC_DAPM_OUTPUT("SPKR"),
SND_SOC_DAPM_OUTPUT("OUT3"),
SND_SOC_DAPM_OUTPUT("OUT4"),
SND_SOC_DAPM_INPUT("LINEL"),
SND_SOC_DAPM_INPUT("LINER"),
SND_SOC_DAPM_INPUT("MONOIN"),
SND_SOC_DAPM_INPUT("PCBEEP"),
SND_SOC_DAPM_INPUT("MIC1"),
SND_SOC_DAPM_INPUT("MIC2A"),
SND_SOC_DAPM_INPUT("MIC2B"),
SND_SOC_DAPM_VMID("VMID"),
};
static const struct snd_soc_dapm_route wm9713_audio_map[] = {
/* left HP mixer */
{"Left HP Mixer", "Beep Playback Switch", "PCBEEP"},
{"Left HP Mixer", "Voice Playback Switch", "Voice DAC"},
{"Left HP Mixer", "Aux Playback Switch", "Aux DAC"},
{"Left HP Mixer", "Bypass Playback Switch", "Left Line In"},
{"Left HP Mixer", "PCM Playback Switch", "Left DAC"},
{"Left HP Mixer", "MonoIn Playback Switch", "Mono In"},
{"Left HP Mixer", NULL, "Capture Headphone Mux"},
/* right HP mixer */
{"Right HP Mixer", "Beep Playback Switch", "PCBEEP"},
{"Right HP Mixer", "Voice Playback Switch", "Voice DAC"},
{"Right HP Mixer", "Aux Playback Switch", "Aux DAC"},
{"Right HP Mixer", "Bypass Playback Switch", "Right Line In"},
{"Right HP Mixer", "PCM Playback Switch", "Right DAC"},
{"Right HP Mixer", "MonoIn Playback Switch", "Mono In"},
{"Right HP Mixer", NULL, "Capture Headphone Mux"},
/* virtual mixer - mixes left & right channels for spk and mono */
{"AC97 Mixer", NULL, "Left DAC"},
{"AC97 Mixer", NULL, "Right DAC"},
{"Line Mixer", NULL, "Right Line In"},
{"Line Mixer", NULL, "Left Line In"},
{"HP Mixer", NULL, "Left HP Mixer"},
{"HP Mixer", NULL, "Right HP Mixer"},
{"Capture Mixer", NULL, "Left Capture Source"},
{"Capture Mixer", NULL, "Right Capture Source"},
/* speaker mixer */
{"Speaker Mixer", "Beep Playback Switch", "PCBEEP"},
{"Speaker Mixer", "Voice Playback Switch", "Voice DAC"},
{"Speaker Mixer", "Aux Playback Switch", "Aux DAC"},
{"Speaker Mixer", "Bypass Playback Switch", "Line Mixer"},
{"Speaker Mixer", "PCM Playback Switch", "AC97 Mixer"},
{"Speaker Mixer", "MonoIn Playback Switch", "Mono In"},
/* mono mixer */
{"Mono Mixer", "Beep Playback Switch", "PCBEEP"},
{"Mono Mixer", "Voice Playback Switch", "Voice DAC"},
{"Mono Mixer", "Aux Playback Switch", "Aux DAC"},
{"Mono Mixer", "Bypass Playback Switch", "Line Mixer"},
{"Mono Mixer", "PCM Playback Switch", "AC97 Mixer"},
{"Mono Mixer", "Mic 1 Sidetone Switch", "Mic A PGA"},
{"Mono Mixer", "Mic 2 Sidetone Switch", "Mic B PGA"},
{"Mono Mixer", NULL, "Capture Mono Mux"},
/* DAC inv mux 1 */
{"DAC Inv Mux 1", "Mono", "Mono Mixer"},
{"DAC Inv Mux 1", "Speaker", "Speaker Mixer"},
{"DAC Inv Mux 1", "Left Headphone", "Left HP Mixer"},
{"DAC Inv Mux 1", "Right Headphone", "Right HP Mixer"},
{"DAC Inv Mux 1", "Headphone Mono", "HP Mixer"},
/* DAC inv mux 2 */
{"DAC Inv Mux 2", "Mono", "Mono Mixer"},
{"DAC Inv Mux 2", "Speaker", "Speaker Mixer"},
{"DAC Inv Mux 2", "Left Headphone", "Left HP Mixer"},
{"DAC Inv Mux 2", "Right Headphone", "Right HP Mixer"},
{"DAC Inv Mux 2", "Headphone Mono", "HP Mixer"},
/* headphone left mux */
{"Left Headphone Out Mux", "Headphone", "Left HP Mixer"},
/* headphone right mux */
{"Right Headphone Out Mux", "Headphone", "Right HP Mixer"},
/* speaker left mux */
{"Left Speaker Out Mux", "Headphone", "Left HP Mixer"},
{"Left Speaker Out Mux", "Speaker", "Speaker Mixer"},
{"Left Speaker Out Mux", "Inv", "DAC Inv Mux 1"},
/* speaker right mux */
{"Right Speaker Out Mux", "Headphone", "Right HP Mixer"},
{"Right Speaker Out Mux", "Speaker", "Speaker Mixer"},
{"Right Speaker Out Mux", "Inv", "DAC Inv Mux 2"},
/* mono mux */
{"Mono Out Mux", "Mono", "Mono Mixer"},
{"Mono Out Mux", "Inv", "DAC Inv Mux 1"},
/* out 3 mux */
{"Out 3 Mux", "Inv 1", "DAC Inv Mux 1"},
/* out 4 mux */
{"Out 4 Mux", "Inv 2", "DAC Inv Mux 2"},
/* output pga */
{"HPL", NULL, "Left Headphone"},
{"Left Headphone", NULL, "Left Headphone Out Mux"},
{"HPR", NULL, "Right Headphone"},
{"Right Headphone", NULL, "Right Headphone Out Mux"},
{"OUT3", NULL, "Out 3"},
{"Out 3", NULL, "Out 3 Mux"},
{"OUT4", NULL, "Out 4"},
{"Out 4", NULL, "Out 4 Mux"},
{"SPKL", NULL, "Left Speaker"},
{"Left Speaker", NULL, "Left Speaker Out Mux"},
{"SPKR", NULL, "Right Speaker"},
{"Right Speaker", NULL, "Right Speaker Out Mux"},
{"MONO", NULL, "Mono Out"},
{"Mono Out", NULL, "Mono Out Mux"},
/* input pga */
{"Left Line In", NULL, "LINEL"},
{"Right Line In", NULL, "LINER"},
{"Mono In", NULL, "MONOIN"},
{"Mic A PGA", NULL, "Mic A Pre Amp"},
{"Mic B PGA", NULL, "Mic B Pre Amp"},
/* left capture select */
{"Left Capture Source", "Mic 1", "Mic A Pre Amp"},
{"Left Capture Source", "Mic 2", "Mic B Pre Amp"},
{"Left Capture Source", "Line", "LINEL"},
{"Left Capture Source", "Mono In", "MONOIN"},
{"Left Capture Source", "Headphone", "Left HP Mixer"},
{"Left Capture Source", "Speaker", "Speaker Mixer"},
{"Left Capture Source", "Mono Out", "Mono Mixer"},
/* right capture select */
{"Right Capture Source", "Mic 1", "Mic A Pre Amp"},
{"Right Capture Source", "Mic 2", "Mic B Pre Amp"},
{"Right Capture Source", "Line", "LINER"},
{"Right Capture Source", "Mono In", "MONOIN"},
{"Right Capture Source", "Headphone", "Right HP Mixer"},
{"Right Capture Source", "Speaker", "Speaker Mixer"},
{"Right Capture Source", "Mono Out", "Mono Mixer"},
/* left ADC */
{"Left ADC", NULL, "Left Capture Source"},
{"Left Voice ADC", NULL, "Left ADC"},
{"Left HiFi ADC", NULL, "Left ADC"},
/* right ADC */
{"Right ADC", NULL, "Right Capture Source"},
{"Right Voice ADC", NULL, "Right ADC"},
{"Right HiFi ADC", NULL, "Right ADC"},
/* mic */
{"Mic A Pre Amp", NULL, "Mic A Source"},
{"Mic A Source", "Mic 1", "MIC1"},
{"Mic A Source", "Mic 2 A", "MIC2A"},
{"Mic A Source", "Mic 2 B", "Mic B Source"},
{"Mic B Pre Amp", "MPB", "Mic B Source"},
{"Mic B Source", NULL, "MIC2B"},
/* headphone capture */
{"Capture Headphone Mux", "Stereo", "Capture Mixer"},
{"Capture Headphone Mux", "Left", "Left Capture Source"},
{"Capture Headphone Mux", "Right", "Right Capture Source"},
/* mono capture */
{"Capture Mono Mux", "Stereo", "Capture Mixer"},
{"Capture Mono Mux", "Left", "Left Capture Source"},
{"Capture Mono Mux", "Right", "Right Capture Source"},
};
static unsigned int ac97_read(struct snd_soc_codec *codec,
unsigned int reg)
{
u16 *cache = codec->reg_cache;
if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 ||
reg == AC97_CD)
return soc_ac97_ops.read(codec->ac97, reg);
else {
reg = reg >> 1;
if (reg >= (ARRAY_SIZE(wm9713_reg)))
return -EIO;
return cache[reg];
}
}
static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int val)
{
u16 *cache = codec->reg_cache;
if (reg < 0x7c)
soc_ac97_ops.write(codec->ac97, reg, val);
reg = reg >> 1;
if (reg < (ARRAY_SIZE(wm9713_reg)))
cache[reg] = val;
return 0;
}
/* PLL divisors */
struct _pll_div {
u32 divsel:1;
u32 divctl:1;
u32 lf:1;
u32 n:4;
u32 k:24;
};
/* The size in bits of the PLL divide multiplied by 10
* to allow rounding later */
#define FIXED_PLL_SIZE ((1 << 22) * 10)
static void pll_factors(struct _pll_div *pll_div, unsigned int source)
{
u64 Kpart;
unsigned int K, Ndiv, Nmod, target;
/* The the PLL output is always 98.304MHz. */
target = 98304000;
/* If the input frequency is over 14.4MHz then scale it down. */
if (source > 14400000) {
source >>= 1;
pll_div->divsel = 1;
if (source > 14400000) {
source >>= 1;
pll_div->divctl = 1;
} else
pll_div->divctl = 0;
} else {
pll_div->divsel = 0;
pll_div->divctl = 0;
}
/* Low frequency sources require an additional divide in the
* loop.
*/
if (source < 8192000) {
pll_div->lf = 1;
target >>= 2;
} else
pll_div->lf = 0;
Ndiv = target / source;
if ((Ndiv < 5) || (Ndiv > 12))
printk(KERN_WARNING
"WM9713 PLL N value %u out of recommended range!\n",
Ndiv);
pll_div->n = Ndiv;
Nmod = target % source;
Kpart = FIXED_PLL_SIZE * (long long)Nmod;
do_div(Kpart, source);
K = Kpart & 0xFFFFFFFF;
/* Check if we need to round */
if ((K % 10) >= 5)
K += 5;
/* Move down to proper range now rounding is done */
K /= 10;
pll_div->k = K;
}
/**
* Please note that changing the PLL input frequency may require
* resynchronisation with the AC97 controller.
*/
static int wm9713_set_pll(struct snd_soc_codec *codec,
int pll_id, unsigned int freq_in, unsigned int freq_out)
{
struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
u16 reg, reg2;
struct _pll_div pll_div;
/* turn PLL off ? */
if (freq_in == 0) {
/* disable PLL power and select ext source */
reg = ac97_read(codec, AC97_HANDSET_RATE);
ac97_write(codec, AC97_HANDSET_RATE, reg | 0x0080);
reg = ac97_read(codec, AC97_EXTENDED_MID);
ac97_write(codec, AC97_EXTENDED_MID, reg | 0x0200);
wm9713->pll_in = 0;
return 0;
}
pll_factors(&pll_div, freq_in);
if (pll_div.k == 0) {
reg = (pll_div.n << 12) | (pll_div.lf << 11) |
(pll_div.divsel << 9) | (pll_div.divctl << 8);
ac97_write(codec, AC97_LINE1_LEVEL, reg);
} else {
/* write the fractional k to the reg 0x46 pages */
reg2 = (pll_div.n << 12) | (pll_div.lf << 11) | (1 << 10) |
(pll_div.divsel << 9) | (pll_div.divctl << 8);
/* K [21:20] */
reg = reg2 | (0x5 << 4) | (pll_div.k >> 20);
ac97_write(codec, AC97_LINE1_LEVEL, reg);
/* K [19:16] */
reg = reg2 | (0x4 << 4) | ((pll_div.k >> 16) & 0xf);
ac97_write(codec, AC97_LINE1_LEVEL, reg);
/* K [15:12] */
reg = reg2 | (0x3 << 4) | ((pll_div.k >> 12) & 0xf);
ac97_write(codec, AC97_LINE1_LEVEL, reg);
/* K [11:8] */
reg = reg2 | (0x2 << 4) | ((pll_div.k >> 8) & 0xf);
ac97_write(codec, AC97_LINE1_LEVEL, reg);
/* K [7:4] */
reg = reg2 | (0x1 << 4) | ((pll_div.k >> 4) & 0xf);
ac97_write(codec, AC97_LINE1_LEVEL, reg);
reg = reg2 | (0x0 << 4) | (pll_div.k & 0xf); /* K [3:0] */
ac97_write(codec, AC97_LINE1_LEVEL, reg);
}
/* turn PLL on and select as source */
reg = ac97_read(codec, AC97_EXTENDED_MID);
ac97_write(codec, AC97_EXTENDED_MID, reg & 0xfdff);
reg = ac97_read(codec, AC97_HANDSET_RATE);
ac97_write(codec, AC97_HANDSET_RATE, reg & 0xff7f);
wm9713->pll_in = freq_in;
/* wait 10ms AC97 link frames for the link to stabilise */
schedule_timeout_interruptible(msecs_to_jiffies(10));
return 0;
}
static int wm9713_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
struct snd_soc_codec *codec = codec_dai->codec;
return wm9713_set_pll(codec, pll_id, freq_in, freq_out);
}
/*
* Tristate the PCM DAI lines, tristate can be disabled by calling
* wm9713_set_dai_fmt()
*/
static int wm9713_set_dai_tristate(struct snd_soc_dai *codec_dai,
int tristate)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 reg = ac97_read(codec, AC97_CENTER_LFE_MASTER) & 0x9fff;
if (tristate)
ac97_write(codec, AC97_CENTER_LFE_MASTER, reg);
return 0;
}
/*
* Configure WM9713 clock dividers.
* Voice DAC needs 256 FS
*/
static int wm9713_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 reg;
switch (div_id) {
case WM9713_PCMCLK_DIV:
reg = ac97_read(codec, AC97_HANDSET_RATE) & 0xf0ff;
ac97_write(codec, AC97_HANDSET_RATE, reg | div);
break;
case WM9713_CLKA_MULT:
reg = ac97_read(codec, AC97_HANDSET_RATE) & 0xfffd;
ac97_write(codec, AC97_HANDSET_RATE, reg | div);
break;
case WM9713_CLKB_MULT:
reg = ac97_read(codec, AC97_HANDSET_RATE) & 0xfffb;
ac97_write(codec, AC97_HANDSET_RATE, reg | div);
break;
case WM9713_HIFI_DIV:
reg = ac97_read(codec, AC97_HANDSET_RATE) & 0x8fff;
ac97_write(codec, AC97_HANDSET_RATE, reg | div);
break;
case WM9713_PCMBCLK_DIV:
reg = ac97_read(codec, AC97_CENTER_LFE_MASTER) & 0xf1ff;
ac97_write(codec, AC97_CENTER_LFE_MASTER, reg | div);
break;
case WM9713_PCMCLK_PLL_DIV:
reg = ac97_read(codec, AC97_LINE1_LEVEL) & 0xff80;
ac97_write(codec, AC97_LINE1_LEVEL, reg | 0x60 | div);
break;
case WM9713_HIFI_PLL_DIV:
reg = ac97_read(codec, AC97_LINE1_LEVEL) & 0xff80;
ac97_write(codec, AC97_LINE1_LEVEL, reg | 0x70 | div);
break;
default:
return -EINVAL;
}
return 0;
}
static int wm9713_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 gpio = ac97_read(codec, AC97_GPIO_CFG) & 0xffc5;
u16 reg = 0x8000;
/* clock masters */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
reg |= 0x4000;
gpio |= 0x0010;
break;
case SND_SOC_DAIFMT_CBM_CFS:
reg |= 0x6000;
gpio |= 0x0018;
break;
case SND_SOC_DAIFMT_CBS_CFS:
reg |= 0x2000;
gpio |= 0x001a;
break;
case SND_SOC_DAIFMT_CBS_CFM:
gpio |= 0x0012;
break;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_IB_IF:
reg |= 0x00c0;
break;
case SND_SOC_DAIFMT_IB_NF:
reg |= 0x0080;
break;
case SND_SOC_DAIFMT_NB_IF:
reg |= 0x0040;
break;
}
/* DAI format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
reg |= 0x0002;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
reg |= 0x0001;
break;
case SND_SOC_DAIFMT_DSP_A:
reg |= 0x0003;
break;
case SND_SOC_DAIFMT_DSP_B:
reg |= 0x0043;
break;
}
ac97_write(codec, AC97_GPIO_CFG, gpio);
ac97_write(codec, AC97_CENTER_LFE_MASTER, reg);
return 0;
}
static int wm9713_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
u16 reg = ac97_read(codec, AC97_CENTER_LFE_MASTER) & 0xfff3;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
reg |= 0x0004;
break;
case SNDRV_PCM_FORMAT_S24_LE:
reg |= 0x0008;
break;
case SNDRV_PCM_FORMAT_S32_LE:
reg |= 0x000c;
break;
}
/* enable PCM interface in master mode */
ac97_write(codec, AC97_CENTER_LFE_MASTER, reg);
return 0;
}
static int ac97_hifi_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct snd_pcm_runtime *runtime = substream->runtime;
int reg;
u16 vra;
vra = ac97_read(codec, AC97_EXTENDED_STATUS);
ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
reg = AC97_PCM_FRONT_DAC_RATE;
else
reg = AC97_PCM_LR_ADC_RATE;
return ac97_write(codec, reg, runtime->rate);
}
static int ac97_aux_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct snd_pcm_runtime *runtime = substream->runtime;
u16 vra, xsle;
vra = ac97_read(codec, AC97_EXTENDED_STATUS);
ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
xsle = ac97_read(codec, AC97_PCI_SID);
ac97_write(codec, AC97_PCI_SID, xsle | 0x8000);
if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
return -ENODEV;
return ac97_write(codec, AC97_PCM_SURR_DAC_RATE, runtime->rate);
}
#define WM9713_RATES (SNDRV_PCM_RATE_8000 | \
SNDRV_PCM_RATE_11025 | \
SNDRV_PCM_RATE_22050 | \
SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000)
#define WM9713_PCM_RATES (SNDRV_PCM_RATE_8000 | \
SNDRV_PCM_RATE_11025 | \
SNDRV_PCM_RATE_16000 | \
SNDRV_PCM_RATE_22050 | \
SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000)
#define WM9713_PCM_FORMATS \
(SNDRV_PCM_FORMAT_S16_LE | SNDRV_PCM_FORMAT_S20_3LE | \
SNDRV_PCM_FORMAT_S24_LE)
static const struct snd_soc_dai_ops wm9713_dai_ops_hifi = {
.prepare = ac97_hifi_prepare,
.set_clkdiv = wm9713_set_dai_clkdiv,
.set_pll = wm9713_set_dai_pll,
};
static const struct snd_soc_dai_ops wm9713_dai_ops_aux = {
.prepare = ac97_aux_prepare,
.set_clkdiv = wm9713_set_dai_clkdiv,
.set_pll = wm9713_set_dai_pll,
};
static const struct snd_soc_dai_ops wm9713_dai_ops_voice = {
.hw_params = wm9713_pcm_hw_params,
.set_clkdiv = wm9713_set_dai_clkdiv,
.set_pll = wm9713_set_dai_pll,
.set_fmt = wm9713_set_dai_fmt,
.set_tristate = wm9713_set_dai_tristate,
};
static struct snd_soc_dai_driver wm9713_dai[] = {
{
.name = "wm9713-hifi",
.ac97_control = 1,
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM9713_RATES,
.formats = SND_SOC_STD_AC97_FMTS,},
.capture = {
.stream_name = "HiFi Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM9713_RATES,
.formats = SND_SOC_STD_AC97_FMTS,},
.ops = &wm9713_dai_ops_hifi,
},
{
.name = "wm9713-aux",
.playback = {
.stream_name = "Aux Playback",
.channels_min = 1,
.channels_max = 1,
.rates = WM9713_RATES,
.formats = SND_SOC_STD_AC97_FMTS,},
.ops = &wm9713_dai_ops_aux,
},
{
.name = "wm9713-voice",
.playback = {
.stream_name = "Voice Playback",
.channels_min = 1,
.channels_max = 1,
.rates = WM9713_PCM_RATES,
.formats = WM9713_PCM_FORMATS,},
.capture = {
.stream_name = "Voice Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM9713_PCM_RATES,
.formats = WM9713_PCM_FORMATS,},
.ops = &wm9713_dai_ops_voice,
.symmetric_rates = 1,
},
};
int wm9713_reset(struct snd_soc_codec *codec, int try_warm)
{
if (try_warm && soc_ac97_ops.warm_reset) {
soc_ac97_ops.warm_reset(codec->ac97);
if (ac97_read(codec, 0) == wm9713_reg[0])
return 1;
}
soc_ac97_ops.reset(codec->ac97);
if (soc_ac97_ops.warm_reset)
soc_ac97_ops.warm_reset(codec->ac97);
if (ac97_read(codec, 0) != wm9713_reg[0])
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(wm9713_reset);
static int wm9713_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 reg;
switch (level) {
case SND_SOC_BIAS_ON:
/* enable thermal shutdown */
reg = ac97_read(codec, AC97_EXTENDED_MID) & 0x1bff;
ac97_write(codec, AC97_EXTENDED_MID, reg);
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
/* enable master bias and vmid */
reg = ac97_read(codec, AC97_EXTENDED_MID) & 0x3bff;
ac97_write(codec, AC97_EXTENDED_MID, reg);
ac97_write(codec, AC97_POWERDOWN, 0x0000);
break;
case SND_SOC_BIAS_OFF:
/* disable everything including AC link */
ac97_write(codec, AC97_EXTENDED_MID, 0xffff);
ac97_write(codec, AC97_EXTENDED_MSTATUS, 0xffff);
ac97_write(codec, AC97_POWERDOWN, 0xffff);
break;
}
codec->dapm.bias_level = level;
return 0;
}
static int wm9713_soc_suspend(struct snd_soc_codec *codec)
{
u16 reg;
/* Disable everything except touchpanel - that will be handled
* by the touch driver and left disabled if touch is not in
* use. */
reg = ac97_read(codec, AC97_EXTENDED_MID);
ac97_write(codec, AC97_EXTENDED_MID, reg | 0x7fff);
ac97_write(codec, AC97_EXTENDED_MSTATUS, 0xffff);
ac97_write(codec, AC97_POWERDOWN, 0x6f00);
ac97_write(codec, AC97_POWERDOWN, 0xffff);
return 0;
}
static int wm9713_soc_resume(struct snd_soc_codec *codec)
{
struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
int i, ret;
u16 *cache = codec->reg_cache;
ret = wm9713_reset(codec, 1);
if (ret < 0) {
printk(KERN_ERR "could not reset AC97 codec\n");
return ret;
}
wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* do we need to re-start the PLL ? */
if (wm9713->pll_in)
wm9713_set_pll(codec, 0, wm9713->pll_in, 0);
/* only synchronise the codec if warm reset failed */
if (ret == 0) {
for (i = 2; i < ARRAY_SIZE(wm9713_reg) << 1; i += 2) {
if (i == AC97_POWERDOWN || i == AC97_EXTENDED_MID ||
i == AC97_EXTENDED_MSTATUS || i > 0x66)
continue;
soc_ac97_ops.write(codec->ac97, i, cache[i>>1]);
}
}
return ret;
}
static int wm9713_soc_probe(struct snd_soc_codec *codec)
{
struct wm9713_priv *wm9713;
int ret = 0, reg;
wm9713 = kzalloc(sizeof(struct wm9713_priv), GFP_KERNEL);
if (wm9713 == NULL)
return -ENOMEM;
snd_soc_codec_set_drvdata(codec, wm9713);
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0)
goto codec_err;
/* do a cold reset for the controller and then try
* a warm reset followed by an optional cold reset for codec */
wm9713_reset(codec, 0);
ret = wm9713_reset(codec, 1);
if (ret < 0) {
printk(KERN_ERR "Failed to reset WM9713: AC97 link error\n");
goto reset_err;
}
wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* unmute the adc - move to kcontrol */
reg = ac97_read(codec, AC97_CD) & 0x7fff;
ac97_write(codec, AC97_CD, reg);
snd_soc_add_codec_controls(codec, wm9713_snd_ac97_controls,
ARRAY_SIZE(wm9713_snd_ac97_controls));
return 0;
reset_err:
snd_soc_free_ac97_codec(codec);
codec_err:
kfree(wm9713);
return ret;
}
static int wm9713_soc_remove(struct snd_soc_codec *codec)
{
struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
snd_soc_free_ac97_codec(codec);
kfree(wm9713);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
.probe = wm9713_soc_probe,
.remove = wm9713_soc_remove,
.suspend = wm9713_soc_suspend,
.resume = wm9713_soc_resume,
.read = ac97_read,
.write = ac97_write,
.set_bias_level = wm9713_set_bias_level,
.reg_cache_size = ARRAY_SIZE(wm9713_reg),
.reg_word_size = sizeof(u16),
.reg_cache_step = 2,
.reg_cache_default = wm9713_reg,
.dapm_widgets = wm9713_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm9713_dapm_widgets),
.dapm_routes = wm9713_audio_map,
.num_dapm_routes = ARRAY_SIZE(wm9713_audio_map),
};
static __devinit int wm9713_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm9713, wm9713_dai, ARRAY_SIZE(wm9713_dai));
}
static int __devexit wm9713_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
}
static struct platform_driver wm9713_codec_driver = {
.driver = {
.name = "wm9713-codec",
.owner = THIS_MODULE,
},
.probe = wm9713_probe,
.remove = __devexit_p(wm9713_remove),
};
module_platform_driver(wm9713_codec_driver);
MODULE_DESCRIPTION("ASoC WM9713/WM9714 driver");
MODULE_AUTHOR("Liam Girdwood");
MODULE_LICENSE("GPL");
| gpl-2.0 |
omegamoon/rockchip-rk30xx-mk808 | arch/um/drivers/mmapper_kern.c | 7676 | 2898 | /*
* arch/um/drivers/mmapper_kern.c
*
* BRIEF MODULE DESCRIPTION
*
* Copyright (C) 2000 RidgeRun, Inc.
* Author: RidgeRun, Inc.
* Greg Lonnon glonnon@ridgerun.com or info@ridgerun.com
*
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/uaccess.h>
#include "mem_user.h"
/* These are set in mmapper_init, which is called at boot time */
static unsigned long mmapper_size;
static unsigned long p_buf;
static char *v_buf;
static ssize_t mmapper_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, v_buf, mmapper_size);
}
static ssize_t mmapper_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (*ppos > mmapper_size)
return -EINVAL;
return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count);
}
static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
static int mmapper_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret = -EINVAL;
int size;
if (vma->vm_pgoff != 0)
goto out;
size = vma->vm_end - vma->vm_start;
if (size > mmapper_size)
return -EFAULT;
/*
* XXX A comment above remap_pfn_range says it should only be
* called when the mm semaphore is held
*/
if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size,
vma->vm_page_prot))
goto out;
ret = 0;
out:
return ret;
}
static int mmapper_open(struct inode *inode, struct file *file)
{
return 0;
}
static int mmapper_release(struct inode *inode, struct file *file)
{
return 0;
}
static const struct file_operations mmapper_fops = {
.owner = THIS_MODULE,
.read = mmapper_read,
.write = mmapper_write,
.unlocked_ioctl = mmapper_ioctl,
.mmap = mmapper_mmap,
.open = mmapper_open,
.release = mmapper_release,
.llseek = default_llseek,
};
/*
* No locking needed - only used (and modified) by below initcall and exitcall.
*/
static struct miscdevice mmapper_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "mmapper",
.fops = &mmapper_fops
};
static int __init mmapper_init(void)
{
int err;
printk(KERN_INFO "Mapper v0.1\n");
v_buf = (char *) find_iomem("mmapper", &mmapper_size);
if (mmapper_size == 0) {
printk(KERN_ERR "mmapper_init - find_iomem failed\n");
return -ENODEV;
}
p_buf = __pa(v_buf);
err = misc_register(&mmapper_dev);
if (err) {
printk(KERN_ERR "mmapper - misc_register failed, err = %d\n",
err);
return err;
}
return 0;
}
static void mmapper_exit(void)
{
misc_deregister(&mmapper_dev);
}
module_init(mmapper_init);
module_exit(mmapper_exit);
MODULE_AUTHOR("Greg Lonnon <glonnon@ridgerun.com>");
MODULE_DESCRIPTION("DSPLinux simulator mmapper driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ChronoMonochrome/android_kernel_ste-3.4 | fs/ntfs/attrib.c | 7676 | 91838 | /**
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include "attrib.h"
#include "debug.h"
#include "layout.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
#include "ntfs.h"
#include "types.h"
/**
* ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
* @ctx: active attribute search context if present or NULL if not
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* If @ctx is specified, it is an active search context of @ni and its base mft
* record. This is needed when ntfs_map_runlist_nolock() encounters unmapped
* runlist fragments and allows their mapping. If you do not have the mft
* record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
* will perform the necessary mapping and unmapping.
*
* Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
* restores it before returning. Thus, @ctx will be left pointing to the same
* attribute on return as on entry. However, the actual pointers in @ctx may
* point to different memory locations on return, so you must remember to reset
* any cached pointers from the @ctx, i.e. after the call to
* ntfs_map_runlist_nolock(), you will probably want to do:
* m = ctx->mrec;
* a = ctx->attr;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
*
* Return 0 on success and -errno on error. There is one special error code
* which is not an error as such. This is -ENOENT. It means that @vcn is out
* of bounds of the runlist.
*
* Note the runlist can be NULL after this function returns if @vcn is zero and
* the attribute has zero allocated size, i.e. there simply is no runlist.
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
* why the mapping of the old inode failed.
*
* Locking: - The runlist described by @ni must be locked for writing on entry
* and is locked on return. Note the runlist will be modified.
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
* entry and it will be left unmapped on return.
* - If @ctx is not NULL, the base mft record must be mapped on entry
* and it will be left mapped on return.
*/
int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
{
VCN end_vcn;
unsigned long flags;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
runlist_element *rl;
struct page *put_this_page = NULL;
int err = 0;
bool ctx_is_temporary, ctx_needs_reset;
ntfs_attr_search_ctx old_ctx = { NULL, };
ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
(unsigned long long)vcn);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
if (!ctx) {
ctx_is_temporary = ctx_needs_reset = true;
m = map_mft_record(base_ni);
if (IS_ERR(m))
return PTR_ERR(m);
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
} else {
VCN allocated_size_vcn;
BUG_ON(IS_ERR(ctx->mrec));
a = ctx->attr;
BUG_ON(!a->non_resident);
ctx_is_temporary = false;
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
read_lock_irqsave(&ni->size_lock, flags);
allocated_size_vcn = ni->allocated_size >>
ni->vol->cluster_size_bits;
read_unlock_irqrestore(&ni->size_lock, flags);
if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
end_vcn = allocated_size_vcn - 1;
/*
* If we already have the attribute extent containing @vcn in
* @ctx, no need to look it up again. We slightly cheat in
* that if vcn exceeds the allocated size, we will refuse to
* map the runlist below, so there is definitely no need to get
* the right attribute extent.
*/
if (vcn >= allocated_size_vcn || (a->type == ni->type &&
a->name_length == ni->name_len &&
!memcmp((u8*)a + le16_to_cpu(a->name_offset),
ni->name, ni->name_len) &&
sle64_to_cpu(a->data.non_resident.lowest_vcn)
<= vcn && end_vcn >= vcn))
ctx_needs_reset = false;
else {
/* Save the old search context. */
old_ctx = *ctx;
/*
* If the currently mapped (extent) inode is not the
* base inode we will unmap it when we reinitialize the
* search context which means we need to get a
* reference to the page containing the mapped mft
* record so we do not accidentally drop changes to the
* mft record when it has not been marked dirty yet.
*/
if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
old_ctx.base_ntfs_ino) {
put_this_page = old_ctx.ntfs_ino->page;
page_cache_get(put_this_page);
}
/*
* Reinitialize the search context so we can lookup the
* needed attribute extent.
*/
ntfs_attr_reinit_search_ctx(ctx);
ctx_needs_reset = true;
}
}
if (ctx_needs_reset) {
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, vcn, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
BUG_ON(!ctx->attr->non_resident);
}
a = ctx->attr;
/*
* Only decompress the mapping pairs if @vcn is inside it. Otherwise
* we get into problems when we try to map an out of bounds vcn because
* we then try to map the already mapped runlist fragment and
* ntfs_mapping_pairs_decompress() fails.
*/
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
if (unlikely(vcn && vcn >= end_vcn)) {
err = -ENOENT;
goto err_out;
}
rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
if (IS_ERR(rl))
err = PTR_ERR(rl);
else
ni->runlist.rl = rl;
err_out:
if (ctx_is_temporary) {
if (likely(ctx))
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
} else if (ctx_needs_reset) {
/*
* If there is no attribute list, restoring the search context
* is accomplished simply by copying the saved context back over
* the caller supplied context. If there is an attribute list,
* things are more complicated as we need to deal with mapping
* of mft records and resulting potential changes in pointers.
*/
if (NInoAttrList(base_ni)) {
/*
* If the currently mapped (extent) inode is not the
* one we had before, we need to unmap it and map the
* old one.
*/
if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
/*
* If the currently mapped inode is not the
* base inode, unmap it.
*/
if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
ctx->base_ntfs_ino) {
unmap_extent_mft_record(ctx->ntfs_ino);
ctx->mrec = ctx->base_mrec;
BUG_ON(!ctx->mrec);
}
/*
* If the old mapped inode is not the base
* inode, map it.
*/
if (old_ctx.base_ntfs_ino &&
old_ctx.ntfs_ino !=
old_ctx.base_ntfs_ino) {
retry_map:
ctx->mrec = map_mft_record(
old_ctx.ntfs_ino);
/*
* Something bad has happened. If out
* of memory retry till it succeeds.
* Any other errors are fatal and we
* return the error code in ctx->mrec.
* Let the caller deal with it... We
* just need to fudge things so the
* caller can reinit and/or put the
* search context safely.
*/
if (IS_ERR(ctx->mrec)) {
if (PTR_ERR(ctx->mrec) ==
-ENOMEM) {
schedule();
goto retry_map;
} else
old_ctx.ntfs_ino =
old_ctx.
base_ntfs_ino;
}
}
}
/* Update the changed pointers in the saved context. */
if (ctx->mrec != old_ctx.mrec) {
if (!IS_ERR(ctx->mrec))
old_ctx.attr = (ATTR_RECORD*)(
(u8*)ctx->mrec +
((u8*)old_ctx.attr -
(u8*)old_ctx.mrec));
old_ctx.mrec = ctx->mrec;
}
}
/* Restore the search context to the saved one. */
*ctx = old_ctx;
/*
* We drop the reference on the page we took earlier. In the
* case that IS_ERR(ctx->mrec) is true this means we might lose
* some changes to the mft record that had been made between
* the last time it was marked dirty/written out and now. This
* at this stage is not a problem as the mapping error is fatal
* enough that the mft record cannot be written out anyway and
* the caller is very likely to shutdown the whole inode
* immediately and mark the volume dirty for chkdsk to pick up
* the pieces anyway.
*/
if (put_this_page)
page_cache_release(put_this_page);
}
return err;
}
/**
* ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* Return 0 on success and -errno on error. There is one special error code
* which is not an error as such. This is -ENOENT. It means that @vcn is out
* of bounds of the runlist.
*
* Locking: - The runlist must be unlocked on entry and is unlocked on return.
* - This function takes the runlist lock for writing and may modify
* the runlist.
*/
int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
{
int err = 0;
down_write(&ni->runlist.lock);
/* Make sure someone else didn't do the work while we were sleeping. */
if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
LCN_RL_NOT_MAPPED))
err = ntfs_map_runlist_nolock(ni, vcn, NULL);
up_write(&ni->runlist.lock);
return err;
}
/**
* ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
* @ni: ntfs inode of the attribute whose runlist to search
* @vcn: vcn to convert
* @write_locked: true if the runlist is locked for writing
*
* Find the virtual cluster number @vcn in the runlist of the ntfs attribute
* described by the ntfs inode @ni and return the corresponding logical cluster
* number (lcn).
*
* If the @vcn is not mapped yet, the attempt is made to map the attribute
* extent containing the @vcn and the vcn to lcn conversion is retried.
*
* If @write_locked is true the caller has locked the runlist for writing and
* if false for reading.
*
* Since lcns must be >= 0, we use negative return codes with special meaning:
*
* Return code Meaning / Description
* ==========================================
* LCN_HOLE Hole / not allocated on disk.
* LCN_ENOENT There is no such vcn in the runlist, i.e. @vcn is out of bounds.
* LCN_ENOMEM Not enough memory to map runlist.
* LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc).
*
* Locking: - The runlist must be locked on entry and is left locked on return.
* - If @write_locked is 'false', i.e. the runlist is locked for reading,
* the lock may be dropped inside the function so you cannot rely on
* the runlist still being the same when this function returns.
*/
LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
const bool write_locked)
{
LCN lcn;
unsigned long flags;
bool is_retry = false;
BUG_ON(!ni);
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
ni->mft_no, (unsigned long long)vcn,
write_locked ? "write" : "read");
BUG_ON(!NInoNonResident(ni));
BUG_ON(vcn < 0);
if (!ni->runlist.rl) {
read_lock_irqsave(&ni->size_lock, flags);
if (!ni->allocated_size) {
read_unlock_irqrestore(&ni->size_lock, flags);
return LCN_ENOENT;
}
read_unlock_irqrestore(&ni->size_lock, flags);
}
retry_remap:
/* Convert vcn to lcn. If that fails map the runlist and retry once. */
lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
if (likely(lcn >= LCN_HOLE)) {
ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
return lcn;
}
if (lcn != LCN_RL_NOT_MAPPED) {
if (lcn != LCN_ENOENT)
lcn = LCN_EIO;
} else if (!is_retry) {
int err;
if (!write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
LCN_RL_NOT_MAPPED)) {
up_write(&ni->runlist.lock);
down_read(&ni->runlist.lock);
goto retry_remap;
}
}
err = ntfs_map_runlist_nolock(ni, vcn, NULL);
if (!write_locked) {
up_write(&ni->runlist.lock);
down_read(&ni->runlist.lock);
}
if (likely(!err)) {
is_retry = true;
goto retry_remap;
}
if (err == -ENOENT)
lcn = LCN_ENOENT;
else if (err == -ENOMEM)
lcn = LCN_ENOMEM;
else
lcn = LCN_EIO;
}
if (lcn != LCN_ENOENT)
ntfs_error(ni->vol->sb, "Failed with error code %lli.",
(long long)lcn);
return lcn;
}
/**
* ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
* @ni: ntfs inode describing the runlist to search
* @vcn: vcn to find
* @ctx: active attribute search context if present or NULL if not
*
* Find the virtual cluster number @vcn in the runlist described by the ntfs
* inode @ni and return the address of the runlist element containing the @vcn.
*
* If the @vcn is not mapped yet, the attempt is made to map the attribute
* extent containing the @vcn and the vcn to lcn conversion is retried.
*
* If @ctx is specified, it is an active search context of @ni and its base mft
* record. This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
* runlist fragments and allows their mapping. If you do not have the mft
* record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
* will perform the necessary mapping and unmapping.
*
* Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
* restores it before returning. Thus, @ctx will be left pointing to the same
* attribute on return as on entry. However, the actual pointers in @ctx may
* point to different memory locations on return, so you must remember to reset
* any cached pointers from the @ctx, i.e. after the call to
* ntfs_attr_find_vcn_nolock(), you will probably want to do:
* m = ctx->mrec;
* a = ctx->attr;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
* Note you need to distinguish between the lcn of the returned runlist element
* being >= 0 and LCN_HOLE. In the later case you have to return zeroes on
* read and allocate clusters on write.
*
* Return the runlist element containing the @vcn on success and
* ERR_PTR(-errno) on error. You need to test the return value with IS_ERR()
* to decide if the return is success or failure and PTR_ERR() to get to the
* error code if IS_ERR() is true.
*
* The possible error return codes are:
* -ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
* -ENOMEM - Not enough memory to map runlist.
* -EIO - Critical error (runlist/file is corrupt, i/o error, etc).
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
* why the mapping of the old inode failed.
*
* Locking: - The runlist described by @ni must be locked for writing on entry
* and is locked on return. Note the runlist may be modified when
* needed runlist fragments need to be mapped.
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
* entry and it will be left unmapped on return.
* - If @ctx is not NULL, the base mft record must be mapped on entry
* and it will be left mapped on return.
*/
runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
ntfs_attr_search_ctx *ctx)
{
unsigned long flags;
runlist_element *rl;
int err = 0;
bool is_retry = false;
BUG_ON(!ni);
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
BUG_ON(!NInoNonResident(ni));
BUG_ON(vcn < 0);
if (!ni->runlist.rl) {
read_lock_irqsave(&ni->size_lock, flags);
if (!ni->allocated_size) {
read_unlock_irqrestore(&ni->size_lock, flags);
return ERR_PTR(-ENOENT);
}
read_unlock_irqrestore(&ni->size_lock, flags);
}
retry_remap:
rl = ni->runlist.rl;
if (likely(rl && vcn >= rl[0].vcn)) {
while (likely(rl->length)) {
if (unlikely(vcn < rl[1].vcn)) {
if (likely(rl->lcn >= LCN_HOLE)) {
ntfs_debug("Done.");
return rl;
}
break;
}
rl++;
}
if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
if (likely(rl->lcn == LCN_ENOENT))
err = -ENOENT;
else
err = -EIO;
}
}
if (!err && !is_retry) {
/*
* If the search context is invalid we cannot map the unmapped
* region.
*/
if (IS_ERR(ctx->mrec))
err = PTR_ERR(ctx->mrec);
else {
/*
* The @vcn is in an unmapped region, map the runlist
* and retry.
*/
err = ntfs_map_runlist_nolock(ni, vcn, ctx);
if (likely(!err)) {
is_retry = true;
goto retry_remap;
}
}
if (err == -EINVAL)
err = -EIO;
} else if (!err)
err = -EIO;
if (err != -ENOENT)
ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
return ERR_PTR(err);
}
/**
* ntfs_attr_find - find (next) attribute in mft record
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* You should not need to call this function directly. Use ntfs_attr_lookup()
* instead.
*
* ntfs_attr_find() takes a search context @ctx as parameter and searches the
* mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
* attribute of @type, optionally @name and @val.
*
* If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
* point to the found attribute.
*
* If the attribute is not found, ntfs_attr_find() returns -ENOENT and
* @ctx->attr will point to the attribute before which the attribute being
* searched for would need to be inserted if such an action were to be desired.
*
* On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is
* undefined and in particular do not rely on it not changing.
*
* If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it
* is 'false', the search begins after @ctx->attr.
*
* If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
* @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
* @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at
* the upcase table. If @ic is CASE_SENSITIVE, the comparison is case
* sensitive. When @name is present, @name_len is the @name length in Unicode
* characters.
*
* If @name is not present (NULL), we assume that the unnamed attribute is
* being searched for.
*
* Finally, the resident attribute value @val is looked for, if present. If
* @val is not present (NULL), @val_len is ignored.
*
* ntfs_attr_find() only searches the specified mft record and it ignores the
* presence of an attribute list attribute (unless it is the one being searched
* for, obviously). If you need to take attribute lists into consideration,
* use ntfs_attr_lookup() instead (see below). This also means that you cannot
* use ntfs_attr_find() to search for extent records of non-resident
* attributes, as extents with lowest_vcn != 0 are usually described by the
* attribute list attribute only. - Note that it is possible that the first
* extent is only in the attribute list while the last extent is in the base
* mft record, so do not rely on being able to find the first extent in the
* base mft record.
*
* Warning: Never use @val when looking for attribute types which can be
* non-resident as this most likely will result in a crash!
*/
static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
const u32 name_len, const IGNORE_CASE_BOOL ic,
const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
{
ATTR_RECORD *a;
ntfs_volume *vol = ctx->ntfs_ino->vol;
ntfschar *upcase = vol->upcase;
u32 upcase_len = vol->upcase_len;
/*
* Iterate over attributes in mft record starting at @ctx->attr, or the
* attribute following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
a = ctx->attr;
ctx->is_first = false;
} else
a = (ATTR_RECORD*)((u8*)ctx->attr +
le32_to_cpu(ctx->attr->length));
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated))
break;
ctx->attr = a;
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
a->type == AT_END))
return -ENOENT;
if (unlikely(!a->length))
break;
if (a->type != type)
continue;
/*
* If @name is present, compare the two names. If @name is
* missing, assume we want an unnamed attribute.
*/
if (!name) {
/* The search failed if the found attribute is named. */
if (a->name_length)
return -ENOENT;
} else if (!ntfs_are_names_equal(name, name_len,
(ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
a->name_length, ic, upcase, upcase_len)) {
register int rc;
rc = ntfs_collate_names(name, name_len,
(ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)),
a->name_length, 1, IGNORE_CASE,
upcase, upcase_len);
/*
* If @name collates before a->name, there is no
* matching attribute.
*/
if (rc == -1)
return -ENOENT;
/* If the strings are not equal, continue search. */
if (rc)
continue;
rc = ntfs_collate_names(name, name_len,
(ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)),
a->name_length, 1, CASE_SENSITIVE,
upcase, upcase_len);
if (rc == -1)
return -ENOENT;
if (rc)
continue;
}
/*
* The names match or @name not present and attribute is
* unnamed. If no @val specified, we have found the attribute
* and are done.
*/
if (!val)
return 0;
/* @val is present; compare values. */
else {
register int rc;
rc = memcmp(val, (u8*)a + le16_to_cpu(
a->data.resident.value_offset),
min_t(u32, val_len, le32_to_cpu(
a->data.resident.value_length)));
/*
* If @val collates before the current attribute's
* value, there is no matching attribute.
*/
if (!rc) {
register u32 avl;
avl = le32_to_cpu(
a->data.resident.value_length);
if (val_len == avl)
return 0;
if (val_len < avl)
return -ENOENT;
} else if (rc < 0)
return -ENOENT;
}
}
ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk.");
NVolSetErrors(vol);
return -EIO;
}
/**
* load_attribute_list - load an attribute list into memory
* @vol: ntfs volume from which to read
* @runlist: runlist of the attribute list
* @al_start: destination buffer
* @size: size of the destination buffer in bytes
* @initialized_size: initialized size of the attribute list
*
* Walk the runlist @runlist and load all clusters from it copying them into
* the linear buffer @al. The maximum number of bytes copied to @al is @size
* bytes. Note, @size does not need to be a multiple of the cluster size. If
* @initialized_size is less than @size, the region in @al between
* @initialized_size and @size will be zeroed and not read from disk.
*
* Return 0 on success or -errno on error.
*/
int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
const s64 size, const s64 initialized_size)
{
LCN lcn;
u8 *al = al_start;
u8 *al_end = al + initialized_size;
runlist_element *rl;
struct buffer_head *bh;
struct super_block *sb;
unsigned long block_size;
unsigned long block, max_block;
int err = 0;
unsigned char block_size_bits;
ntfs_debug("Entering.");
if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
initialized_size > size)
return -EINVAL;
if (!initialized_size) {
memset(al, 0, size);
return 0;
}
sb = vol->sb;
block_size = sb->s_blocksize;
block_size_bits = sb->s_blocksize_bits;
down_read(&runlist->lock);
rl = runlist->rl;
if (!rl) {
ntfs_error(sb, "Cannot read attribute list since runlist is "
"missing.");
goto err_out;
}
/* Read all clusters specified by the runlist one run at a time. */
while (rl->length) {
lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)rl->vcn,
(unsigned long long)lcn);
/* The attribute list cannot be sparse. */
if (lcn < 0) {
ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot "
"read attribute list.");
goto err_out;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
/* Read the run from device in chunks of block_size bytes. */
max_block = block + (rl->length << vol->cluster_size_bits >>
block_size_bits);
ntfs_debug("max_block = 0x%lx.", max_block);
do {
ntfs_debug("Reading block = 0x%lx.", block);
bh = sb_bread(sb, block);
if (!bh) {
ntfs_error(sb, "sb_bread() failed. Cannot "
"read attribute list.");
goto err_out;
}
if (al + block_size >= al_end)
goto do_final;
memcpy(al, bh->b_data, block_size);
brelse(bh);
al += block_size;
} while (++block < max_block);
rl++;
}
if (initialized_size < size) {
initialize:
memset(al_start + initialized_size, 0, size - initialized_size);
}
done:
up_read(&runlist->lock);
return err;
do_final:
if (al < al_end) {
/*
* Partial block.
*
* Note: The attribute list can be smaller than its allocation
* by multiple clusters. This has been encountered by at least
* two people running Windows XP, thus we cannot do any
* truncation sanity checking here. (AIA)
*/
memcpy(al, bh->b_data, al_end - al);
brelse(bh);
if (initialized_size < size)
goto initialize;
goto done;
}
brelse(bh);
/* Real overflow! */
ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
"is truncated.");
err_out:
err = -EIO;
goto done;
}
/**
* ntfs_external_attr_find - find an attribute in the attribute list of an inode
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* You should not need to call this function directly. Use ntfs_attr_lookup()
* instead.
*
* Find an attribute by searching the attribute list for the corresponding
* attribute list entry. Having found the entry, map the mft record if the
* attribute is in a different mft record/inode, ntfs_attr_find() the attribute
* in there and return it.
*
* On first search @ctx->ntfs_ino must be the base mft record and @ctx must
* have been obtained from a call to ntfs_attr_get_search_ctx(). On subsequent
* calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
* then the base inode).
*
* After finishing with the attribute/mft record you need to call
* ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
* mapped inodes, etc).
*
* If the attribute is found, ntfs_external_attr_find() returns 0 and
* @ctx->attr will point to the found attribute. @ctx->mrec will point to the
* mft record in which @ctx->attr is located and @ctx->al_entry will point to
* the attribute list entry for the attribute.
*
* If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
* @ctx->attr will point to the attribute in the base mft record before which
* the attribute being searched for would need to be inserted if such an action
* were to be desired. @ctx->mrec will point to the mft record in which
* @ctx->attr is located and @ctx->al_entry will point to the attribute list
* entry of the attribute before which the attribute being searched for would
* need to be inserted if such an action were to be desired.
*
* Thus to insert the not found attribute, one wants to add the attribute to
* @ctx->mrec (the base mft record) and if there is not enough space, the
* attribute should be placed in a newly allocated extent mft record. The
* attribute list entry for the inserted attribute should be inserted in the
* attribute list attribute at @ctx->al_entry.
*
* On actual error, ntfs_external_attr_find() returns -EIO. In this case
* @ctx->attr is undefined and in particular do not rely on it not changing.
*/
static int ntfs_external_attr_find(const ATTR_TYPE type,
const ntfschar *name, const u32 name_len,
const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
{
ntfs_inode *base_ni, *ni;
ntfs_volume *vol;
ATTR_LIST_ENTRY *al_entry, *next_al_entry;
u8 *al_start, *al_end;
ATTR_RECORD *a;
ntfschar *al_name;
u32 al_name_len;
int err = 0;
static const char *es = " Unmount and run chkdsk.";
ni = ctx->ntfs_ino;
base_ni = ctx->base_ntfs_ino;
ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
if (!base_ni) {
/* First call happens with the base mft record. */
base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
ctx->base_mrec = ctx->mrec;
}
if (ni == base_ni)
ctx->base_attr = ctx->attr;
if (type == AT_END)
goto not_found;
vol = base_ni->vol;
al_start = base_ni->attr_list;
al_end = al_start + base_ni->attr_list_size;
if (!ctx->al_entry)
ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
/*
* Iterate over entries in attribute list starting at @ctx->al_entry,
* or the entry following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
al_entry = ctx->al_entry;
ctx->is_first = false;
} else
al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
le16_to_cpu(ctx->al_entry->length));
for (;; al_entry = next_al_entry) {
/* Out of bounds check. */
if ((u8*)al_entry < base_ni->attr_list ||
(u8*)al_entry > al_end)
break; /* Inode is corrupt. */
ctx->al_entry = al_entry;
/* Catch the end of the attribute list. */
if ((u8*)al_entry == al_end)
goto not_found;
if (!al_entry->length)
break;
if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
le16_to_cpu(al_entry->length) > al_end)
break;
next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
le16_to_cpu(al_entry->length));
if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
goto not_found;
if (type != al_entry->type)
continue;
/*
* If @name is present, compare the two names. If @name is
* missing, assume we want an unnamed attribute.
*/
al_name_len = al_entry->name_length;
al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
if (!name) {
if (al_name_len)
goto not_found;
} else if (!ntfs_are_names_equal(al_name, al_name_len, name,
name_len, ic, vol->upcase, vol->upcase_len)) {
register int rc;
rc = ntfs_collate_names(name, name_len, al_name,
al_name_len, 1, IGNORE_CASE,
vol->upcase, vol->upcase_len);
/*
* If @name collates before al_name, there is no
* matching attribute.
*/
if (rc == -1)
goto not_found;
/* If the strings are not equal, continue search. */
if (rc)
continue;
/*
* FIXME: Reverse engineering showed 0, IGNORE_CASE but
* that is inconsistent with ntfs_attr_find(). The
* subsequent rc checks were also different. Perhaps I
* made a mistake in one of the two. Need to recheck
* which is correct or at least see what is going on...
* (AIA)
*/
rc = ntfs_collate_names(name, name_len, al_name,
al_name_len, 1, CASE_SENSITIVE,
vol->upcase, vol->upcase_len);
if (rc == -1)
goto not_found;
if (rc)
continue;
}
/*
* The names match or @name not present and attribute is
* unnamed. Now check @lowest_vcn. Continue search if the
* next attribute list entry still fits @lowest_vcn. Otherwise
* we have reached the right one or the search has failed.
*/
if (lowest_vcn && (u8*)next_al_entry >= al_start &&
(u8*)next_al_entry + 6 < al_end &&
(u8*)next_al_entry + le16_to_cpu(
next_al_entry->length) <= al_end &&
sle64_to_cpu(next_al_entry->lowest_vcn) <=
lowest_vcn &&
next_al_entry->type == al_entry->type &&
next_al_entry->name_length == al_name_len &&
ntfs_are_names_equal((ntfschar*)((u8*)
next_al_entry +
next_al_entry->name_offset),
next_al_entry->name_length,
al_name, al_name_len, CASE_SENSITIVE,
vol->upcase, vol->upcase_len))
continue;
if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
ntfs_error(vol->sb, "Found stale mft "
"reference in attribute list "
"of base inode 0x%lx.%s",
base_ni->mft_no, es);
err = -EIO;
break;
}
} else { /* Mft references do not match. */
/* If there is a mapped record unmap it first. */
if (ni != base_ni)
unmap_extent_mft_record(ni);
/* Do we want the base record back? */
if (MREF_LE(al_entry->mft_reference) ==
base_ni->mft_no) {
ni = ctx->ntfs_ino = base_ni;
ctx->mrec = ctx->base_mrec;
} else {
/* We want an extent record. */
ctx->mrec = map_extent_mft_record(base_ni,
le64_to_cpu(
al_entry->mft_reference), &ni);
if (IS_ERR(ctx->mrec)) {
ntfs_error(vol->sb, "Failed to map "
"extent mft record "
"0x%lx of base inode "
"0x%lx.%s",
MREF_LE(al_entry->
mft_reference),
base_ni->mft_no, es);
err = PTR_ERR(ctx->mrec);
if (err == -ENOENT)
err = -EIO;
/* Cause @ctx to be sanitized below. */
ni = NULL;
break;
}
ctx->ntfs_ino = ni;
}
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
}
/*
* ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
* mft record containing the attribute represented by the
* current al_entry.
*/
/*
* We could call into ntfs_attr_find() to find the right
* attribute in this mft record but this would be less
* efficient and not quite accurate as ntfs_attr_find() ignores
* the attribute instance numbers for example which become
* important when one plays with attribute lists. Also,
* because a proper match has been found in the attribute list
* entry above, the comparison can now be optimized. So it is
* worth re-implementing a simplified ntfs_attr_find() here.
*/
a = ctx->attr;
/*
* Use a manual loop so we can still use break and continue
* with the same meanings as above.
*/
do_next_attr_loop:
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated))
break;
if (a->type == AT_END)
break;
if (!a->length)
break;
if (al_entry->instance != a->instance)
goto do_next_attr;
/*
* If the type and/or the name are mismatched between the
* attribute list entry and the attribute record, there is
* corruption so we break and return error EIO.
*/
if (al_entry->type != a->type)
break;
if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)), a->name_length,
al_name, al_name_len, CASE_SENSITIVE,
vol->upcase, vol->upcase_len))
break;
ctx->attr = a;
/*
* If no @val specified or @val specified and it matches, we
* have found it!
*/
if (!val || (!a->non_resident && le32_to_cpu(
a->data.resident.value_length) == val_len &&
!memcmp((u8*)a +
le16_to_cpu(a->data.resident.value_offset),
val, val_len))) {
ntfs_debug("Done, found.");
return 0;
}
do_next_attr:
/* Proceed to the next attribute in the current mft record. */
a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
goto do_next_attr_loop;
}
if (!err) {
ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
"attribute list attribute.%s", base_ni->mft_no,
es);
err = -EIO;
}
if (ni != base_ni) {
if (ni)
unmap_extent_mft_record(ni);
ctx->ntfs_ino = base_ni;
ctx->mrec = ctx->base_mrec;
ctx->attr = ctx->base_attr;
}
if (err != -ENOMEM)
NVolSetErrors(vol);
return err;
not_found:
/*
* If we were looking for AT_END, we reset the search context @ctx and
* use ntfs_attr_find() to seek to the end of the base mft record.
*/
if (type == AT_END) {
ntfs_attr_reinit_search_ctx(ctx);
return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
ctx);
}
/*
* The attribute was not found. Before we return, we want to ensure
* @ctx->mrec and @ctx->attr indicate the position at which the
* attribute should be inserted in the base mft record. Since we also
* want to preserve @ctx->al_entry we cannot reinitialize the search
* context using ntfs_attr_reinit_search_ctx() as this would set
* @ctx->al_entry to NULL. Thus we do the necessary bits manually (see
* ntfs_attr_init_search_ctx() below). Note, we _only_ preserve
* @ctx->al_entry as the remaining fields (base_*) are identical to
* their non base_ counterparts and we cannot set @ctx->base_attr
* correctly yet as we do not know what @ctx->attr will be set to by
* the call to ntfs_attr_find() below.
*/
if (ni != base_ni)
unmap_extent_mft_record(ni);
ctx->mrec = ctx->base_mrec;
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
ctx->is_first = true;
ctx->ntfs_ino = base_ni;
ctx->base_ntfs_ino = NULL;
ctx->base_mrec = NULL;
ctx->base_attr = NULL;
/*
* In case there are multiple matches in the base mft record, need to
* keep enumerating until we get an attribute not found response (or
* another error), otherwise we would keep returning the same attribute
* over and over again and all programs using us for enumeration would
* lock up in a tight loop.
*/
do {
err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
ctx);
} while (!err);
ntfs_debug("Done, not found.");
return err;
}
/**
* ntfs_attr_lookup - find an attribute in an ntfs inode
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must
* be the base mft record and @ctx must have been obtained from a call to
* ntfs_attr_get_search_ctx().
*
* This function transparently handles attribute lists and @ctx is used to
* continue searches where they were left off at.
*
* After finishing with the attribute/mft record you need to call
* ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
* mapped inodes, etc).
*
* Return 0 if the search was successful and -errno if not.
*
* When 0, @ctx->attr is the found attribute and it is in mft record
* @ctx->mrec. If an attribute list attribute is present, @ctx->al_entry is
* the attribute list entry of the found attribute.
*
* When -ENOENT, @ctx->attr is the attribute which collates just after the
* attribute being searched for, i.e. if one wants to add the attribute to the
* mft record this is the correct place to insert it into. If an attribute
* list attribute is present, @ctx->al_entry is the attribute list entry which
* collates just after the attribute list entry of the attribute being searched
* for, i.e. if one wants to add the attribute to the mft record this is the
* correct place to insert its attribute list entry into.
*
* When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is
* then undefined and in particular you should not rely on it not changing.
*/
int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
const u32 name_len, const IGNORE_CASE_BOOL ic,
const VCN lowest_vcn, const u8 *val, const u32 val_len,
ntfs_attr_search_ctx *ctx)
{
ntfs_inode *base_ni;
ntfs_debug("Entering.");
BUG_ON(IS_ERR(ctx->mrec));
if (ctx->base_ntfs_ino)
base_ni = ctx->base_ntfs_ino;
else
base_ni = ctx->ntfs_ino;
/* Sanity check, just for debugging really. */
BUG_ON(!base_ni);
if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
return ntfs_attr_find(type, name, name_len, ic, val, val_len,
ctx);
return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
val, val_len, ctx);
}
/**
* ntfs_attr_init_search_ctx - initialize an attribute search context
* @ctx: attribute search context to initialize
* @ni: ntfs inode with which to initialize the search context
* @mrec: mft record with which to initialize the search context
*
* Initialize the attribute search context @ctx with @ni and @mrec.
*/
static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
ntfs_inode *ni, MFT_RECORD *mrec)
{
*ctx = (ntfs_attr_search_ctx) {
.mrec = mrec,
/* Sanity checks are performed elsewhere. */
.attr = (ATTR_RECORD*)((u8*)mrec +
le16_to_cpu(mrec->attrs_offset)),
.is_first = true,
.ntfs_ino = ni,
};
}
/**
* ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
* @ctx: attribute search context to reinitialize
*
* Reinitialize the attribute search context @ctx, unmapping an associated
* extent mft record if present, and initialize the search context again.
*
* This is used when a search for a new attribute is being started to reset
* the search context to the beginning.
*/
void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
{
if (likely(!ctx->base_ntfs_ino)) {
/* No attribute list. */
ctx->is_first = true;
/* Sanity checks are performed elsewhere. */
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
/*
* This needs resetting due to ntfs_external_attr_find() which
* can leave it set despite having zeroed ctx->base_ntfs_ino.
*/
ctx->al_entry = NULL;
return;
} /* Attribute list. */
if (ctx->ntfs_ino != ctx->base_ntfs_ino)
unmap_extent_mft_record(ctx->ntfs_ino);
ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
return;
}
/**
* ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
* @ni: ntfs inode with which to initialize the search context
* @mrec: mft record with which to initialize the search context
*
* Allocate a new attribute search context, initialize it with @ni and @mrec,
* and return it. Return NULL if allocation failed.
*/
ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
{
ntfs_attr_search_ctx *ctx;
ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
if (ctx)
ntfs_attr_init_search_ctx(ctx, ni, mrec);
return ctx;
}
/**
* ntfs_attr_put_search_ctx - release an attribute search context
* @ctx: attribute search context to free
*
* Release the attribute search context @ctx, unmapping an associated extent
* mft record if present.
*/
void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
{
if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
unmap_extent_mft_record(ctx->ntfs_ino);
kmem_cache_free(ntfs_attr_ctx_cache, ctx);
return;
}
#ifdef NTFS_RW
/**
* ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to find
*
* Search for the attribute definition record corresponding to the attribute
* @type in the $AttrDef system file.
*
* Return the attribute type definition record if found and NULL if not found.
*/
static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
const ATTR_TYPE type)
{
ATTR_DEF *ad;
BUG_ON(!vol->attrdef);
BUG_ON(!type);
for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
vol->attrdef_size && ad->type; ++ad) {
/* We have not found it yet, carry on searching. */
if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
continue;
/* We found the attribute; return it. */
if (likely(ad->type == type))
return ad;
/* We have gone too far already. No point in continuing. */
break;
}
/* Attribute not found. */
ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
le32_to_cpu(type));
return NULL;
}
/**
* ntfs_attr_size_bounds_check - check a size of an attribute type for validity
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
* @size: size which to check
*
* Check whether the @size in bytes is valid for an attribute of @type on the
* ntfs volume @vol. This information is obtained from $AttrDef system file.
*
* Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
* listed in $AttrDef.
*/
int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
const s64 size)
{
ATTR_DEF *ad;
BUG_ON(size < 0);
/*
* $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
* listed in $AttrDef.
*/
if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
return -ERANGE;
/* Get the $AttrDef entry for the attribute @type. */
ad = ntfs_attr_find_in_attrdef(vol, type);
if (unlikely(!ad))
return -ENOENT;
/* Do the bounds check. */
if (((sle64_to_cpu(ad->min_size) > 0) &&
size < sle64_to_cpu(ad->min_size)) ||
((sle64_to_cpu(ad->max_size) > 0) && size >
sle64_to_cpu(ad->max_size)))
return -ERANGE;
return 0;
}
/**
* ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
*
* Check whether the attribute of @type on the ntfs volume @vol is allowed to
* be non-resident. This information is obtained from $AttrDef system file.
*
* Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
* -ENOENT if the attribute is not listed in $AttrDef.
*/
int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
{
ATTR_DEF *ad;
/* Find the attribute definition record in $AttrDef. */
ad = ntfs_attr_find_in_attrdef(vol, type);
if (unlikely(!ad))
return -ENOENT;
/* Check the flags and return the result. */
if (ad->flags & ATTR_DEF_RESIDENT)
return -EPERM;
return 0;
}
/**
* ntfs_attr_can_be_resident - check if an attribute can be resident
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
*
* Check whether the attribute of @type on the ntfs volume @vol is allowed to
* be resident. This information is derived from our ntfs knowledge and may
* not be completely accurate, especially when user defined attributes are
* present. Basically we allow everything to be resident except for index
* allocation and $EA attributes.
*
* Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
*
* Warning: In the system file $MFT the attribute $Bitmap must be non-resident
* otherwise windows will not boot (blue screen of death)! We cannot
* check for this here as we do not know which inode's $Bitmap is
* being asked about so the caller needs to special case this.
*/
int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
{
if (type == AT_INDEX_ALLOCATION)
return -EPERM;
return 0;
}
/**
* ntfs_attr_record_resize - resize an attribute record
* @m: mft record containing attribute record
* @a: attribute record to resize
* @new_size: new size in bytes to which to resize the attribute record @a
*
* Resize the attribute record @a, i.e. the resident part of the attribute, in
* the mft record @m to @new_size bytes.
*
* Return 0 on success and -errno on error. The following error codes are
* defined:
* -ENOSPC - Not enough space in the mft record @m to perform the resize.
*
* Note: On error, no modifications have been performed whatsoever.
*
* Warning: If you make a record smaller without having copied all the data you
* are interested in the data may be overwritten.
*/
int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
{
ntfs_debug("Entering for new_size %u.", new_size);
/* Align to 8 bytes if it is not already done. */
if (new_size & 7)
new_size = (new_size + 7) & ~7;
/* If the actual attribute length has changed, move things around. */
if (new_size != le32_to_cpu(a->length)) {
u32 new_muse = le32_to_cpu(m->bytes_in_use) -
le32_to_cpu(a->length) + new_size;
/* Not enough space in this mft record. */
if (new_muse > le32_to_cpu(m->bytes_allocated))
return -ENOSPC;
/* Move attributes following @a to their new location. */
memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
le32_to_cpu(m->bytes_in_use) - ((u8*)a -
(u8*)m) - le32_to_cpu(a->length));
/* Adjust @m to reflect the change in used space. */
m->bytes_in_use = cpu_to_le32(new_muse);
/* Adjust @a to reflect the new size. */
if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
a->length = cpu_to_le32(new_size);
}
return 0;
}
/**
* ntfs_resident_attr_value_resize - resize the value of a resident attribute
* @m: mft record containing attribute record
* @a: attribute record whose value to resize
* @new_size: new size in bytes to which to resize the attribute value of @a
*
* Resize the value of the attribute @a in the mft record @m to @new_size bytes.
* If the value is made bigger, the newly allocated space is cleared.
*
* Return 0 on success and -errno on error. The following error codes are
* defined:
* -ENOSPC - Not enough space in the mft record @m to perform the resize.
*
* Note: On error, no modifications have been performed whatsoever.
*
* Warning: If you make a record smaller without having copied all the data you
* are interested in the data may be overwritten.
*/
int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
const u32 new_size)
{
u32 old_size;
/* Resize the resident part of the attribute record. */
if (ntfs_attr_record_resize(m, a,
le16_to_cpu(a->data.resident.value_offset) + new_size))
return -ENOSPC;
/*
* The resize succeeded! If we made the attribute value bigger, clear
* the area between the old size and @new_size.
*/
old_size = le32_to_cpu(a->data.resident.value_length);
if (new_size > old_size)
memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
old_size, 0, new_size - old_size);
/* Finally update the length of the attribute value. */
a->data.resident.value_length = cpu_to_le32(new_size);
return 0;
}
/**
* ntfs_attr_make_non_resident - convert a resident to a non-resident attribute
* @ni: ntfs inode describing the attribute to convert
* @data_size: size of the resident data to copy to the non-resident attribute
*
* Convert the resident ntfs attribute described by the ntfs inode @ni to a
* non-resident one.
*
* @data_size must be equal to the attribute value size. This is needed since
* we need to know the size before we can map the mft record and our callers
* always know it. The reason we cannot simply read the size from the vfs
* inode i_size is that this is not necessarily uptodate. This happens when
* ntfs_attr_make_non_resident() is called in the ->truncate call path(s).
*
* Return 0 on success and -errno on error. The following error return codes
* are defined:
* -EPERM - The attribute is not allowed to be non-resident.
* -ENOMEM - Not enough memory.
* -ENOSPC - Not enough disk space.
* -EINVAL - Attribute not defined on the volume.
* -EIO - I/o error or other error.
* Note that -ENOSPC is also returned in the case that there is not enough
* space in the mft record to do the conversion. This can happen when the mft
* record is already very full. The caller is responsible for trying to make
* space in the mft record and trying again. FIXME: Do we need a separate
* error return code for this kind of -ENOSPC or is it always worth trying
* again in case the attribute may then fit in a resident state so no need to
* make it non-resident at all? Ho-hum... (AIA)
*
* NOTE to self: No changes in the attribute list are required to move from
* a resident to a non-resident attribute.
*
* Locking: - The caller must hold i_mutex on the inode.
*/
int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
{
s64 new_size;
struct inode *vi = VFS_I(ni);
ntfs_volume *vol = ni->vol;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
struct page *page;
runlist_element *rl;
u8 *kaddr;
unsigned long flags;
int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
u32 attr_size;
u8 old_res_attr_flags;
/* Check that the attribute is allowed to be non-resident. */
err = ntfs_attr_can_be_non_resident(vol, ni->type);
if (unlikely(err)) {
if (err == -EPERM)
ntfs_debug("Attribute is not allowed to be "
"non-resident.");
else
ntfs_debug("Attribute not defined on the NTFS "
"volume!");
return err;
}
/*
* FIXME: Compressed and encrypted attributes are not supported when
* writing and we should never have gotten here for them.
*/
BUG_ON(NInoCompressed(ni));
BUG_ON(NInoEncrypted(ni));
/*
* The size needs to be aligned to a cluster boundary for allocation
* purposes.
*/
new_size = (data_size + vol->cluster_size - 1) &
~(vol->cluster_size - 1);
if (new_size > 0) {
/*
* Will need the page later and since the page lock nests
* outside all ntfs locks, we need to get the page now.
*/
page = find_or_create_page(vi->i_mapping, 0,
mapping_gfp_mask(vi->i_mapping));
if (unlikely(!page))
return -ENOMEM;
/* Start by allocating clusters to hold the attribute value. */
rl = ntfs_cluster_alloc(vol, 0, new_size >>
vol->cluster_size_bits, -1, DATA_ZONE, true);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
ntfs_debug("Failed to allocate cluster%s, error code "
"%i.", (new_size >>
vol->cluster_size_bits) > 1 ? "s" : "",
err);
goto page_err_out;
}
} else {
rl = NULL;
page = NULL;
}
/* Determine the size of the mapping pairs array. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
if (unlikely(mp_size < 0)) {
err = mp_size;
ntfs_debug("Failed to get size for mapping pairs array, error "
"code %i.", err);
goto rl_err_out;
}
down_write(&ni->runlist.lock);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
BUG_ON(NInoNonResident(ni));
BUG_ON(a->non_resident);
/*
* Calculate new offsets for the name and the mapping pairs array.
*/
if (NInoSparse(ni) || NInoCompressed(ni))
name_ofs = (offsetof(ATTR_REC,
data.non_resident.compressed_size) +
sizeof(a->data.non_resident.compressed_size) +
7) & ~7;
else
name_ofs = (offsetof(ATTR_REC,
data.non_resident.compressed_size) + 7) & ~7;
mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
/*
* Determine the size of the resident part of the now non-resident
* attribute record.
*/
arec_size = (mp_ofs + mp_size + 7) & ~7;
/*
* If the page is not uptodate bring it uptodate by copying from the
* attribute value.
*/
attr_size = le32_to_cpu(a->data.resident.value_length);
BUG_ON(attr_size != data_size);
if (page && !PageUptodate(page)) {
kaddr = kmap_atomic(page);
memcpy(kaddr, (u8*)a +
le16_to_cpu(a->data.resident.value_offset),
attr_size);
memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
kunmap_atomic(kaddr);
flush_dcache_page(page);
SetPageUptodate(page);
}
/* Backup the attribute flag. */
old_res_attr_flags = a->data.resident.flags;
/* Resize the resident part of the attribute record. */
err = ntfs_attr_record_resize(m, a, arec_size);
if (unlikely(err))
goto err_out;
/*
* Convert the resident part of the attribute record to describe a
* non-resident attribute.
*/
a->non_resident = 1;
/* Move the attribute name if it exists and update the offset. */
if (a->name_length)
memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
a->name_length * sizeof(ntfschar));
a->name_offset = cpu_to_le16(name_ofs);
/* Setup the fields specific to non-resident attributes. */
a->data.non_resident.lowest_vcn = 0;
a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
vol->cluster_size_bits);
a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
memset(&a->data.non_resident.reserved, 0,
sizeof(a->data.non_resident.reserved));
a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
a->data.non_resident.data_size =
a->data.non_resident.initialized_size =
cpu_to_sle64(attr_size);
if (NInoSparse(ni) || NInoCompressed(ni)) {
a->data.non_resident.compression_unit = 0;
if (NInoCompressed(ni) || vol->major_ver < 3)
a->data.non_resident.compression_unit = 4;
a->data.non_resident.compressed_size =
a->data.non_resident.allocated_size;
} else
a->data.non_resident.compression_unit = 0;
/* Generate the mapping pairs array into the attribute record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
arec_size - mp_ofs, rl, 0, -1, NULL);
if (unlikely(err)) {
ntfs_debug("Failed to build mapping pairs, error code %i.",
err);
goto undo_err_out;
}
/* Setup the in-memory attribute structure to be non-resident. */
ni->runlist.rl = rl;
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_size;
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size = ni->allocated_size;
if (a->data.non_resident.compression_unit) {
ni->itype.compressed.block_size = 1U << (a->data.
non_resident.compression_unit +
vol->cluster_size_bits);
ni->itype.compressed.block_size_bits =
ffs(ni->itype.compressed.block_size) -
1;
ni->itype.compressed.block_clusters = 1U <<
a->data.non_resident.compression_unit;
} else {
ni->itype.compressed.block_size = 0;
ni->itype.compressed.block_size_bits = 0;
ni->itype.compressed.block_clusters = 0;
}
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = ni->allocated_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
/*
* This needs to be last since the address space operations ->readpage
* and ->writepage can run concurrently with us as they are not
* serialized on i_mutex. Note, we are not allowed to fail once we flip
* this switch, which is another reason to do this last.
*/
NInoSetNonResident(ni);
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
if (page) {
set_page_dirty(page);
unlock_page(page);
mark_page_accessed(page);
page_cache_release(page);
}
ntfs_debug("Done.");
return 0;
undo_err_out:
/* Convert the attribute back into a resident attribute. */
a->non_resident = 0;
/* Move the attribute name if it exists and update the offset. */
name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
sizeof(a->data.resident.reserved) + 7) & ~7;
if (a->name_length)
memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
a->name_length * sizeof(ntfschar));
mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
a->name_offset = cpu_to_le16(name_ofs);
arec_size = (mp_ofs + attr_size + 7) & ~7;
/* Resize the resident part of the attribute record. */
err2 = ntfs_attr_record_resize(m, a, arec_size);
if (unlikely(err2)) {
/*
* This cannot happen (well if memory corruption is at work it
* could happen in theory), but deal with it as well as we can.
* If the old size is too small, truncate the attribute,
* otherwise simply give it a larger allocated size.
* FIXME: Should check whether chkdsk complains when the
* allocated size is much bigger than the resident value size.
*/
arec_size = le32_to_cpu(a->length);
if ((mp_ofs + attr_size) > arec_size) {
err2 = attr_size;
attr_size = arec_size - mp_ofs;
ntfs_error(vol->sb, "Failed to undo partial resident "
"to non-resident attribute "
"conversion. Truncating inode 0x%lx, "
"attribute type 0x%x from %i bytes to "
"%i bytes to maintain metadata "
"consistency. THIS MEANS YOU ARE "
"LOSING %i BYTES DATA FROM THIS %s.",
vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
err2, attr_size, err2 - attr_size,
((ni->type == AT_DATA) &&
!ni->name_len) ? "FILE": "ATTRIBUTE");
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = attr_size;
i_size_write(vi, attr_size);
write_unlock_irqrestore(&ni->size_lock, flags);
}
}
/* Setup the fields specific to resident attributes. */
a->data.resident.value_length = cpu_to_le32(attr_size);
a->data.resident.value_offset = cpu_to_le16(mp_ofs);
a->data.resident.flags = old_res_attr_flags;
memset(&a->data.resident.reserved, 0,
sizeof(a->data.resident.reserved));
/* Copy the data from the page back to the attribute value. */
if (page) {
kaddr = kmap_atomic(page);
memcpy((u8*)a + mp_ofs, kaddr, attr_size);
kunmap_atomic(kaddr);
}
/* Setup the allocated size in the ntfs inode in case it changed. */
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = arec_size - mp_ofs;
write_unlock_irqrestore(&ni->size_lock, flags);
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
ni->runlist.rl = NULL;
up_write(&ni->runlist.lock);
rl_err_out:
if (rl) {
if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
ntfs_error(vol->sb, "Failed to release allocated "
"cluster(s) in error code path. Run "
"chkdsk to recover the lost "
"cluster(s).");
NVolSetErrors(vol);
}
ntfs_free(rl);
page_err_out:
unlock_page(page);
page_cache_release(page);
}
if (err == -EINVAL)
err = -EIO;
return err;
}
/**
* ntfs_attr_extend_allocation - extend the allocated space of an attribute
* @ni: ntfs inode of the attribute whose allocation to extend
* @new_alloc_size: new size in bytes to which to extend the allocation to
* @new_data_size: new size in bytes to which to extend the data to
* @data_start: beginning of region which is required to be non-sparse
*
* Extend the allocated space of an attribute described by the ntfs inode @ni
* to @new_alloc_size bytes. If @data_start is -1, the whole extension may be
* implemented as a hole in the file (as long as both the volume and the ntfs
* inode @ni have sparse support enabled). If @data_start is >= 0, then the
* region between the old allocated size and @data_start - 1 may be made sparse
* but the regions between @data_start and @new_alloc_size must be backed by
* actual clusters.
*
* If @new_data_size is -1, it is ignored. If it is >= 0, then the data size
* of the attribute is extended to @new_data_size. Note that the i_size of the
* vfs inode is not updated. Only the data size in the base attribute record
* is updated. The caller has to update i_size separately if this is required.
* WARNING: It is a BUG() for @new_data_size to be smaller than the old data
* size as well as for @new_data_size to be greater than @new_alloc_size.
*
* For resident attributes this involves resizing the attribute record and if
* necessary moving it and/or other attributes into extent mft records and/or
* converting the attribute to a non-resident attribute which in turn involves
* extending the allocation of a non-resident attribute as described below.
*
* For non-resident attributes this involves allocating clusters in the data
* zone on the volume (except for regions that are being made sparse) and
* extending the run list to describe the allocated clusters as well as
* updating the mapping pairs array of the attribute. This in turn involves
* resizing the attribute record and if necessary moving it and/or other
* attributes into extent mft records and/or splitting the attribute record
* into multiple extent attribute records.
*
* Also, the attribute list attribute is updated if present and in some of the
* above cases (the ones where extent mft records/attributes come into play),
* an attribute list attribute is created if not already present.
*
* Return the new allocated size on success and -errno on error. In the case
* that an error is encountered but a partial extension at least up to
* @data_start (if present) is possible, the allocation is partially extended
* and this is returned. This means the caller must check the returned size to
* determine if the extension was partial. If @data_start is -1 then partial
* allocations are not performed.
*
* WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
*
* Locking: This function takes the runlist lock of @ni for writing as well as
* locking the mft record of the base ntfs inode. These locks are maintained
* throughout execution of the function. These locks are required so that the
* attribute can be resized safely and so that it can for example be converted
* from resident to non-resident safely.
*
* TODO: At present attribute list attribute handling is not implemented.
*
* TODO: At present it is not safe to call this function for anything other
* than the $DATA attribute(s) of an uncompressed and unencrypted file.
*/
s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
const s64 new_data_size, const s64 data_start)
{
VCN vcn;
s64 ll, allocated_size, start = data_start;
struct inode *vi = VFS_I(ni);
ntfs_volume *vol = ni->vol;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
runlist_element *rl, *rl2;
unsigned long flags;
int err, mp_size;
u32 attr_len = 0; /* Silence stupid gcc warning. */
bool mp_rebuilt;
#ifdef DEBUG
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
"old_allocated_size 0x%llx, "
"new_allocated_size 0x%llx, new_data_size 0x%llx, "
"data_start 0x%llx.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
(unsigned long long)allocated_size,
(unsigned long long)new_alloc_size,
(unsigned long long)new_data_size,
(unsigned long long)start);
#endif
retry_extend:
/*
* For non-resident attributes, @start and @new_size need to be aligned
* to cluster boundaries for allocation purposes.
*/
if (NInoNonResident(ni)) {
if (start > 0)
start &= ~(s64)vol->cluster_size_mask;
new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
~(s64)vol->cluster_size_mask;
}
BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
/* Check if new size is allowed in $AttrDef. */
err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
if (unlikely(err)) {
/* Only emit errors when the write will fail completely. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size) {
if (err == -ERANGE) {
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because the new "
"allocation would exceed the "
"maximum allowed size for "
"this attribute type.",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
} else {
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because this "
"attribute type is not "
"defined on the NTFS volume. "
"Possible corruption! You "
"should run chkdsk!",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
}
}
/* Translate error code to be POSIX conformant for write(2). */
if (err == -ERANGE)
err = -EFBIG;
else
err = -EIO;
return err;
}
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
/*
* We will be modifying both the runlist (if non-resident) and the mft
* record so lock them both down.
*/
down_write(&ni->runlist.lock);
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
/*
* If non-resident, seek to the last extent. If resident, there is
* only one extent, so seek to that.
*/
vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
0;
/*
* Abort if someone did the work whilst we waited for the locks. If we
* just converted the attribute from resident to non-resident it is
* likely that exactly this has happened already. We cannot quite
* abort if we need to update the data size.
*/
if (unlikely(new_alloc_size <= allocated_size)) {
ntfs_debug("Allocated size already exceeds requested size.");
new_alloc_size = allocated_size;
if (new_data_size < 0)
goto done;
/*
* We want the first attribute extent so that we can update the
* data size.
*/
vcn = 0;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, vcn, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
/* Use goto to reduce indentation. */
if (a->non_resident)
goto do_non_resident_extend;
BUG_ON(NInoNonResident(ni));
/* The total length of the attribute value. */
attr_len = le32_to_cpu(a->data.resident.value_length);
/*
* Extend the attribute record to be able to store the new attribute
* size. ntfs_attr_record_resize() will not do anything if the size is
* not changing.
*/
if (new_alloc_size < vol->mft_record_size &&
!ntfs_attr_record_resize(m, a,
le16_to_cpu(a->data.resident.value_offset) +
new_alloc_size)) {
/* The resize succeeded! */
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset);
write_unlock_irqrestore(&ni->size_lock, flags);
if (new_data_size >= 0) {
BUG_ON(new_data_size < attr_len);
a->data.resident.value_length =
cpu_to_le32((u32)new_data_size);
}
goto flush_done;
}
/*
* We have to drop all the locks so we can call
* ntfs_attr_make_non_resident(). This could be optimised by try-
* locking the first page cache page and only if that fails dropping
* the locks, locking the page, and redoing all the locking and
* lookups. While this would be a huge optimisation, it is not worth
* it as this is definitely a slow code path.
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* Not enough space in the mft record, try to make the attribute
* non-resident and if successful restart the extension process.
*/
err = ntfs_attr_make_non_resident(ni, attr_len);
if (likely(!err))
goto retry_extend;
/*
* Could not make non-resident. If this is due to this not being
* permitted for this attribute type or there not being enough space,
* try to make other attributes non-resident. Otherwise fail.
*/
if (unlikely(err != -EPERM && err != -ENOSPC)) {
/* Only emit errors when the write will fail completely. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the conversion from resident "
"to non-resident attribute failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
goto conv_err_out;
}
/* TODO: Not implemented from here, abort. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size) {
if (err == -ENOSPC)
ntfs_error(vol->sb, "Not enough space in the mft "
"record/on disk for the non-resident "
"attribute value. This case is not "
"implemented yet.");
else /* if (err == -EPERM) */
ntfs_error(vol->sb, "This attribute type may not be "
"non-resident. This case is not "
"implemented yet.");
}
err = -EOPNOTSUPP;
goto conv_err_out;
#if 0
// TODO: Attempt to make other attributes non-resident.
if (!err)
goto do_resident_extend;
/*
* Both the attribute list attribute and the standard information
* attribute must remain in the base inode. Thus, if this is one of
* these attributes, we have to try to move other attributes out into
* extent mft records instead.
*/
if (ni->type == AT_ATTRIBUTE_LIST ||
ni->type == AT_STANDARD_INFORMATION) {
// TODO: Attempt to move other attributes into extent mft
// records.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
goto err_out;
}
// TODO: Attempt to move this attribute to an extent mft record, but
// only if it is not already the only attribute in an mft record in
// which case there would be nothing to gain.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
/* There is nothing we can do to make enough space. )-: */
goto err_out;
#endif
do_non_resident_extend:
BUG_ON(!NInoNonResident(ni));
if (new_alloc_size == allocated_size) {
BUG_ON(vcn);
goto alloc_done;
}
/*
* If the data starts after the end of the old allocation, this is a
* $DATA attribute and sparse attributes are enabled on the volume and
* for this inode, then create a sparse region between the old
* allocated size and the start of the data. Otherwise simply proceed
* with filling the whole space between the old allocated size and the
* new allocated size with clusters.
*/
if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
!NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
goto skip_sparse;
// TODO: This is not implemented yet. We just fill in with real
// clusters for now...
ntfs_debug("Inserting holes is not-implemented yet. Falling back to "
"allocating real clusters instead.");
skip_sparse:
rl = ni->runlist.rl;
if (likely(rl)) {
/* Seek to the end of the runlist. */
while (rl->length)
rl++;
}
/* If this attribute extent is not mapped, map it now. */
if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
(rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
(rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
if (!rl && !allocated_size)
goto first_alloc;
rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because the "
"mapping of a runlist "
"fragment failed with error "
"code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
err);
if (err != -ENOMEM)
err = -EIO;
goto err_out;
}
ni->runlist.rl = rl;
/* Seek to the end of the runlist. */
while (rl->length)
rl++;
}
/*
* We now know the runlist of the last extent is mapped and @rl is at
* the end of the runlist. We want to begin allocating clusters
* starting at the last allocated cluster to reduce fragmentation. If
* there are no valid LCNs in the attribute we let the cluster
* allocator choose the starting cluster.
*/
/* If the last LCN is a hole or simillar seek back to last real LCN. */
while (rl->lcn < 0 && rl > ni->runlist.rl)
rl--;
first_alloc:
// FIXME: Need to implement partial allocations so at least part of the
// write can be performed when start >= 0. (Needed for POSIX write(2)
// conformance.)
rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
(new_alloc_size - allocated_size) >>
vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
rl->lcn + rl->length : -1, DATA_ZONE, true);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the allocation of clusters "
"failed with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM && err != -ENOSPC)
err = -EIO;
goto err_out;
}
rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the runlist merge failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
if (ntfs_cluster_free_from_rl(vol, rl2)) {
ntfs_error(vol->sb, "Failed to release allocated "
"cluster(s) in error code path. Run "
"chkdsk to recover the lost "
"cluster(s).");
NVolSetErrors(vol);
}
ntfs_free(rl2);
goto err_out;
}
ni->runlist.rl = rl;
ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
allocated_size) >> vol->cluster_size_bits);
/* Find the runlist element with which the attribute extent starts. */
ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
BUG_ON(!rl2);
BUG_ON(!rl2->length);
BUG_ON(rl2->lcn < LCN_HOLE);
mp_rebuilt = false;
/* Get the size for the new mapping pairs array for this extent. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
if (unlikely(mp_size <= 0)) {
err = mp_size;
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because determining the size for the "
"mapping pairs failed with error code "
"%i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
err = -EIO;
goto undo_alloc;
}
/* Extend the attribute record to fit the bigger mapping pairs array. */
attr_len = le32_to_cpu(a->length);
err = ntfs_attr_record_resize(m, a, mp_size +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
if (unlikely(err)) {
BUG_ON(err != -ENOSPC);
// TODO: Deal with this by moving this extent to a new mft
// record or by starting a new extent in a new mft record,
// possibly by extending this extent partially and filling it
// and creating a new extent for the remainder, or by making
// other attributes non-resident and/or by moving other
// attributes out of this mft record.
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Not enough space in the mft "
"record for the extended attribute "
"record. This case is not "
"implemented yet.");
err = -EOPNOTSUPP;
goto undo_alloc;
}
mp_rebuilt = true;
/* Generate the mapping pairs array directly into the attr record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
mp_size, rl2, ll, -1, NULL);
if (unlikely(err)) {
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because building the mapping pairs "
"failed with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
err = -EIO;
goto undo_alloc;
}
/* Update the highest_vcn. */
a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
vol->cluster_size_bits) - 1);
/*
* We now have extended the allocated size of the attribute. Reflect
* this in the ntfs_inode structure and the attribute record.
*/
if (a->data.non_resident.lowest_vcn) {
/*
* We are not in the first attribute extent, switch to it, but
* first ensure the changes will make it to disk later.
*/
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err))
goto restore_undo_alloc;
/* @m is not used any more so no need to set it. */
a = ctx->attr;
}
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
/*
* FIXME: This would fail if @ni is a directory, $MFT, or an index,
* since those can have sparse/compressed set. For example can be
* set compressed even though it is not compressed itself and in that
* case the bit means that files are to be created compressed in the
* directory... At present this is ok as this code is only called for
* regular files, and only for their $DATA attribute(s).
* FIXME: The calculation is wrong if we created a hole above. For now
* it does not matter as we never create holes.
*/
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size += new_alloc_size - allocated_size;
a->data.non_resident.compressed_size =
cpu_to_sle64(ni->itype.compressed.size);
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
alloc_done:
if (new_data_size >= 0) {
BUG_ON(new_data_size <
sle64_to_cpu(a->data.non_resident.data_size));
a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
}
flush_done:
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
done:
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
ntfs_debug("Done, new_allocated_size 0x%llx.",
(unsigned long long)new_alloc_size);
return new_alloc_size;
restore_undo_alloc:
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot complete extension of allocation "
"of inode 0x%lx, attribute type 0x%x, because "
"lookup of first attribute extent failed with "
"error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err == -ENOENT)
err = -EIO;
ntfs_attr_reinit_search_ctx(ctx);
if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
allocated_size >> vol->cluster_size_bits, NULL, 0,
ctx)) {
ntfs_error(vol->sb, "Failed to find last attribute extent of "
"attribute in error code path. Run chkdsk to "
"recover.");
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
/*
* FIXME: This would fail if @ni is a directory... See above.
* FIXME: The calculation is wrong if we created a hole above.
* For now it does not matter as we never create holes.
*/
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size += new_alloc_size -
allocated_size;
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* The only thing that is now wrong is the allocated size of the
* base attribute extent which chkdsk should be able to fix.
*/
NVolSetErrors(vol);
return err;
}
ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
(allocated_size >> vol->cluster_size_bits) - 1);
undo_alloc:
ll = allocated_size >> vol->cluster_size_bits;
if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
"in error code path. Run chkdsk to recover "
"the lost cluster(s).");
NVolSetErrors(vol);
}
m = ctx->mrec;
a = ctx->attr;
/*
* If the runlist truncation fails and/or the search context is no
* longer valid, we cannot resize the attribute record or build the
* mapping pairs array thus we mark the inode bad so that no access to
* the freed clusters can happen.
*/
if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
ntfs_error(vol->sb, "Failed to %s in error code path. Run "
"chkdsk to recover.", IS_ERR(m) ?
"restore attribute search context" :
"truncate attribute runlist");
NVolSetErrors(vol);
} else if (mp_rebuilt) {
if (ntfs_attr_record_resize(m, a, attr_len)) {
ntfs_error(vol->sb, "Failed to restore attribute "
"record in error code path. Run "
"chkdsk to recover.");
NVolSetErrors(vol);
} else /* if (success) */ {
if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
a->data.non_resident.
mapping_pairs_offset), attr_len -
le16_to_cpu(a->data.non_resident.
mapping_pairs_offset), rl2, ll, -1,
NULL)) {
ntfs_error(vol->sb, "Failed to restore "
"mapping pairs array in error "
"code path. Run chkdsk to "
"recover.");
NVolSetErrors(vol);
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
}
}
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
conv_err_out:
ntfs_debug("Failed. Returning error code %i.", err);
return err;
}
/**
* ntfs_attr_set - fill (a part of) an attribute with a byte
* @ni: ntfs inode describing the attribute to fill
* @ofs: offset inside the attribute at which to start to fill
* @cnt: number of bytes to fill
* @val: the unsigned 8-bit value with which to fill the attribute
*
* Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
* byte offset @ofs inside the attribute with the constant byte @val.
*
* This function is effectively like memset() applied to an ntfs attribute.
* Note thie function actually only operates on the page cache pages belonging
* to the ntfs attribute and it marks them dirty after doing the memset().
* Thus it relies on the vm dirty page write code paths to cause the modified
* pages to be written to the mft record/disk.
*
* Return 0 on success and -errno on error. An error code of -ESPIPE means
* that @ofs + @cnt were outside the end of the attribute and no write was
* performed.
*/
int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
{
ntfs_volume *vol = ni->vol;
struct address_space *mapping;
struct page *page;
u8 *kaddr;
pgoff_t idx, end;
unsigned start_ofs, end_ofs, size;
ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
(long long)ofs, (long long)cnt, val);
BUG_ON(ofs < 0);
BUG_ON(cnt < 0);
if (!cnt)
goto done;
/*
* FIXME: Compressed and encrypted attributes are not supported when
* writing and we should never have gotten here for them.
*/
BUG_ON(NInoCompressed(ni));
BUG_ON(NInoEncrypted(ni));
mapping = VFS_I(ni)->i_mapping;
/* Work out the starting index and page offset. */
idx = ofs >> PAGE_CACHE_SHIFT;
start_ofs = ofs & ~PAGE_CACHE_MASK;
/* Work out the ending index and page offset. */
end = ofs + cnt;
end_ofs = end & ~PAGE_CACHE_MASK;
/* If the end is outside the inode size return -ESPIPE. */
if (unlikely(end > i_size_read(VFS_I(ni)))) {
ntfs_error(vol->sb, "Request exceeds end of attribute.");
return -ESPIPE;
}
end >>= PAGE_CACHE_SHIFT;
/* If there is a first partial page, need to do it the slow way. */
if (start_ofs) {
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
"page (error, index 0x%lx).", idx);
return PTR_ERR(page);
}
/*
* If the last page is the same as the first page, need to
* limit the write to the end offset.
*/
size = PAGE_CACHE_SIZE;
if (idx == end)
size = end_ofs;
kaddr = kmap_atomic(page);
memset(kaddr + start_ofs, val, size - start_ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
if (idx == end)
goto done;
idx++;
}
/* Do the whole pages the fast way. */
for (; idx < end; idx++) {
/* Find or create the current page. (The page is locked.) */
page = grab_cache_page(mapping, idx);
if (unlikely(!page)) {
ntfs_error(vol->sb, "Insufficient memory to grab "
"page (index 0x%lx).", idx);
return -ENOMEM;
}
kaddr = kmap_atomic(page);
memset(kaddr, val, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr);
/*
* If the page has buffers, mark them uptodate since buffer
* state and not page state is definitive in 2.6 kernels.
*/
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
bh = head = page_buffers(page);
do {
set_buffer_uptodate(bh);
} while ((bh = bh->b_this_page) != head);
}
/* Now that buffers are uptodate, set the page uptodate, too. */
SetPageUptodate(page);
/*
* Set the page and all its buffers dirty and mark the inode
* dirty, too. The VM will write the page later on.
*/
set_page_dirty(page);
/* Finally unlock and release the page. */
unlock_page(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
/* If there is a last partial page, need to do it the slow way. */
if (end_ofs) {
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
"(error, index 0x%lx).", idx);
return PTR_ERR(page);
}
kaddr = kmap_atomic(page);
memset(kaddr, val, end_ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
done:
ntfs_debug("Done.");
return 0;
}
#endif /* NTFS_RW */
| gpl-2.0 |
jomeister15/ICS-SGH-I727-kernel | drivers/mtd/maps/dilnetpc.c | 8188 | 13605 | /* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP"
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
* featuring the AMD Elan SC410 processor. There are two variants of this
* board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
* ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs
* flash and 16 megs of RAM.
* For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm
* and http://www.ssv-embedded.de/ssv/pc104/p170.htm
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/concat.h>
#include <asm/io.h>
/*
** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
** Destroying any of these blocks transforms the DNPC into
** a paperweight (albeit not a very useful one, considering
** it only weighs a few grams).
**
** Therefore, the BIOS blocks must never be erased or written to
** except by people who know exactly what they are doing (e.g.
** to install a BIOS update). These partitions are marked read-only
** by default, but can be made read/write by undefining
** DNPC_BIOS_BLOCKS_WRITEPROTECTED:
*/
#define DNPC_BIOS_BLOCKS_WRITEPROTECTED
/*
** The ID string (in ROM) is checked to determine whether we
** are running on a DNP/1486 or ADNP/1486
*/
#define BIOSID_BASE 0x000fe100
#define ID_DNPC "DNP1486"
#define ID_ADNP "ADNP1486"
/*
** Address where the flash should appear in CPU space
*/
#define FLASH_BASE 0x2000000
/*
** Chip Setup and Control (CSC) indexed register space
*/
#define CSC_INDEX 0x22
#define CSC_DATA 0x23
#define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */
#define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */
#define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */
#define CSC_CR 0xd0 /* internal I/O device disable/Echo */
/* Z-bus/configuration register */
#define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */
/*
** PC Card indexed register space:
*/
#define PCC_INDEX 0x3e0
#define PCC_DATA 0x3e1
#define PCC_AWER_B 0x46 /* Socket B Address Window enable register */
#define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */
#define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */
#define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */
#define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */
#define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */
#define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */
/*
** Access to SC4x0's Chip Setup and Control (CSC)
** and PC Card (PCC) indexed registers:
*/
static inline void setcsc(int reg, unsigned char data)
{
outb(reg, CSC_INDEX);
outb(data, CSC_DATA);
}
static inline unsigned char getcsc(int reg)
{
outb(reg, CSC_INDEX);
return(inb(CSC_DATA));
}
static inline void setpcc(int reg, unsigned char data)
{
outb(reg, PCC_INDEX);
outb(data, PCC_DATA);
}
static inline unsigned char getpcc(int reg)
{
outb(reg, PCC_INDEX);
return(inb(PCC_DATA));
}
/*
************************************************************
** Enable access to DIL/NetPC's flash by mapping it into
** the SC4x0's MMS Window C.
************************************************************
*/
static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size)
{
unsigned long flash_end = flash_base + flash_size - 1;
/*
** enable setup of MMS windows C-F:
*/
/* - enable PC Card indexed register space */
setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
/* - set PC Card controller to operate in standard mode */
setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1);
/*
** Program base address and end address of window
** where the flash ROM should appear in CPU address space
*/
setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff);
setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f);
setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff);
setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f);
/* program offset of first flash location to appear in this window (0) */
setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff);
setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f);
/* set attributes for MMS window C: non-cacheable, write-enabled */
setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11);
/* select physical device ROMCS0 (i.e. flash) for MMS Window C */
setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03);
/* enable memory window 1 */
setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02);
/* now disable PC Card indexed register space again */
setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
}
/*
************************************************************
** Disable access to DIL/NetPC's flash by mapping it into
** the SC4x0's MMS Window C.
************************************************************
*/
static void dnpc_unmap_flash(void)
{
/* - enable PC Card indexed register space */
setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
/* disable memory window 1 */
setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02);
/* now disable PC Card indexed register space again */
setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
}
/*
************************************************************
** Enable/Disable VPP to write to flash
************************************************************
*/
static DEFINE_SPINLOCK(dnpc_spin);
static int vpp_counter = 0;
/*
** This is what has to be done for the DNP board ..
*/
static void dnp_set_vpp(struct map_info *not_used, int on)
{
spin_lock_irq(&dnpc_spin);
if (on)
{
if(++vpp_counter == 1)
setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4);
}
else
{
if(--vpp_counter == 0)
setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4);
else
BUG_ON(vpp_counter < 0);
}
spin_unlock_irq(&dnpc_spin);
}
/*
** .. and this the ADNP version:
*/
static void adnp_set_vpp(struct map_info *not_used, int on)
{
spin_lock_irq(&dnpc_spin);
if (on)
{
if(++vpp_counter == 1)
setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8);
}
else
{
if(--vpp_counter == 0)
setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8);
else
BUG_ON(vpp_counter < 0);
}
spin_unlock_irq(&dnpc_spin);
}
#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */
#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */
#define WINDOW_ADDR FLASH_BASE
static struct map_info dnpc_map = {
.name = "ADNP Flash Bank",
.size = ADNP_WINDOW_SIZE,
.bankwidth = 1,
.set_vpp = adnp_set_vpp,
.phys = WINDOW_ADDR
};
/*
** The layout of the flash is somewhat "strange":
**
** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data
** 2. 64 KiB (1 block) : System BIOS
** 3. 960 KiB (15 blocks) : User Data (DNP model) or
** 3. 3008 KiB (47 blocks) : User Data (ADNP model)
** 4. 64 KiB (1 block) : System BIOS Entry
*/
static struct mtd_partition partition_info[]=
{
{
.name = "ADNP boot",
.offset = 0,
.size = 0xf0000,
},
{
.name = "ADNP system BIOS",
.offset = MTDPART_OFS_NXTBLK,
.size = 0x10000,
#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
.mask_flags = MTD_WRITEABLE,
#endif
},
{
.name = "ADNP file system",
.offset = MTDPART_OFS_NXTBLK,
.size = 0x2f0000,
},
{
.name = "ADNP system BIOS entry",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
.mask_flags = MTD_WRITEABLE,
#endif
},
};
#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
static struct mtd_info *mymtd;
static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
static struct mtd_info *merged_mtd;
/*
** "Highlevel" partition info:
**
** Using the MTD concat layer, we can re-arrange partitions to our
** liking: we construct a virtual MTD device by concatenating the
** partitions, specifying the sequence such that the boot block
** is immediately followed by the filesystem block (i.e. the stupid
** system BIOS block is mapped to a different place). When re-partitioning
** this concatenated MTD device, we can set the boot block size to
** an arbitrary (though erase block aligned) value i.e. not one that
** is dictated by the flash's physical layout. We can thus set the
** boot block to be e.g. 64 KB (which is fully sufficient if we want
** to boot an etherboot image) or to -say- 1.5 MB if we want to boot
** a large kernel image. In all cases, the remainder of the flash
** is available as file system space.
*/
static struct mtd_partition higlvl_partition_info[]=
{
{
.name = "ADNP boot block",
.offset = 0,
.size = CONFIG_MTD_DILNETPC_BOOTSIZE,
},
{
.name = "ADNP file system space",
.offset = MTDPART_OFS_NXTBLK,
.size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
},
{
.name = "ADNP system BIOS + BIOS Entry",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
.mask_flags = MTD_WRITEABLE,
#endif
},
};
#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
static int dnp_adnp_probe(void)
{
char *biosid, rc = -1;
biosid = (char*)ioremap(BIOSID_BASE, 16);
if(biosid)
{
if(!strcmp(biosid, ID_DNPC))
rc = 1; /* this is a DNPC */
else if(!strcmp(biosid, ID_ADNP))
rc = 0; /* this is a ADNPC */
}
iounmap((void *)biosid);
return(rc);
}
static int __init init_dnpc(void)
{
int is_dnp;
/*
** determine hardware (DNP/ADNP/invalid)
*/
if((is_dnp = dnp_adnp_probe()) < 0)
return -ENXIO;
/*
** Things are set up for ADNP by default
** -> modify all that needs to be different for DNP
*/
if(is_dnp)
{ /*
** Adjust window size, select correct set_vpp function.
** The partitioning scheme is identical on both DNP
** and ADNP except for the size of the third partition.
*/
int i;
dnpc_map.size = DNP_WINDOW_SIZE;
dnpc_map.set_vpp = dnp_set_vpp;
partition_info[2].size = 0xf0000;
/*
** increment all string pointers so the leading 'A' gets skipped,
** thus turning all occurrences of "ADNP ..." into "DNP ..."
*/
++dnpc_map.name;
for(i = 0; i < NUM_PARTITIONS; i++)
++partition_info[i].name;
higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
++higlvl_partition_info[i].name;
}
printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n",
is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys);
dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);
dnpc_map_flash(dnpc_map.phys, dnpc_map.size);
if (!dnpc_map.virt) {
printk("Failed to ioremap_nocache\n");
return -EIO;
}
simple_map_init(&dnpc_map);
printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);
mymtd = do_map_probe("jedec_probe", &dnpc_map);
if (!mymtd)
mymtd = do_map_probe("cfi_probe", &dnpc_map);
/*
** If flash probes fail, try to make flashes accessible
** at least as ROM. Ajust erasesize in this case since
** the default one (128M) will break our partitioning
*/
if (!mymtd)
if((mymtd = do_map_probe("map_rom", &dnpc_map)))
mymtd->erasesize = 0x10000;
if (!mymtd) {
iounmap(dnpc_map.virt);
return -ENXIO;
}
mymtd->owner = THIS_MODULE;
/*
** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
** -> add_mtd_partitions() will _not_ register MTD devices for
** the partitions, but will instead store pointers to the MTD
** objects it creates into our lowlvl_parts[] array.
** NOTE: we arrange the pointers such that the sequence of the
** partitions gets re-arranged: partition #2 follows
** partition #0.
*/
partition_info[0].mtdp = &lowlvl_parts[0];
partition_info[1].mtdp = &lowlvl_parts[2];
partition_info[2].mtdp = &lowlvl_parts[1];
partition_info[3].mtdp = &lowlvl_parts[3];
mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
/*
** now create a virtual MTD device by concatenating the for partitions
** (in the sequence given by the lowlvl_parts[] array.
*/
merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated");
if(merged_mtd)
{ /*
** now partition the new device the way we want it. This time,
** we do not supply mtd pointers in higlvl_partition_info, so
** add_mtd_partitions() will register the devices.
*/
mtd_device_register(merged_mtd, higlvl_partition_info,
NUM_HIGHLVL_PARTITIONS);
}
return 0;
}
static void __exit cleanup_dnpc(void)
{
if(merged_mtd) {
mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (dnpc_map.virt) {
iounmap(dnpc_map.virt);
dnpc_unmap_flash();
dnpc_map.virt = NULL;
}
}
module_init(init_dnpc);
module_exit(cleanup_dnpc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP");
| gpl-2.0 |
diegoheusser/linux | arch/avr32/mach-at32ap/intc.c | 9212 | 4204 | /*
* Copyright (C) 2006, 2008 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/export.h>
#include <asm/io.h>
#include "intc.h"
struct intc {
void __iomem *regs;
struct irq_chip chip;
#ifdef CONFIG_PM
unsigned long suspend_ipr;
unsigned long saved_ipr[64];
#endif
};
extern struct platform_device at32_intc0_device;
/*
* TODO: We may be able to implement mask/unmask by setting IxM flags
* in the status register.
*/
static void intc_mask_irq(struct irq_data *d)
{
}
static void intc_unmask_irq(struct irq_data *d)
{
}
static struct intc intc0 = {
.chip = {
.name = "intc",
.irq_mask = intc_mask_irq,
.irq_unmask = intc_unmask_irq,
},
};
/*
* All interrupts go via intc at some point.
*/
asmlinkage void do_IRQ(int level, struct pt_regs *regs)
{
struct pt_regs *old_regs;
unsigned int irq;
unsigned long status_reg;
local_irq_disable();
old_regs = set_irq_regs(regs);
irq_enter();
irq = intc_readl(&intc0, INTCAUSE0 - 4 * level);
generic_handle_irq(irq);
/*
* Clear all interrupt level masks so that we may handle
* interrupts during softirq processing. If this is a nested
* interrupt, interrupts must stay globally disabled until we
* return.
*/
status_reg = sysreg_read(SR);
status_reg &= ~(SYSREG_BIT(I0M) | SYSREG_BIT(I1M)
| SYSREG_BIT(I2M) | SYSREG_BIT(I3M));
sysreg_write(SR, status_reg);
irq_exit();
set_irq_regs(old_regs);
}
void __init init_IRQ(void)
{
extern void _evba(void);
extern void irq_level0(void);
struct resource *regs;
struct clk *pclk;
unsigned int i;
u32 offset, readback;
regs = platform_get_resource(&at32_intc0_device, IORESOURCE_MEM, 0);
if (!regs) {
printk(KERN_EMERG "intc: no mmio resource defined\n");
goto fail;
}
pclk = clk_get(&at32_intc0_device.dev, "pclk");
if (IS_ERR(pclk)) {
printk(KERN_EMERG "intc: no clock defined\n");
goto fail;
}
clk_enable(pclk);
intc0.regs = ioremap(regs->start, resource_size(regs));
if (!intc0.regs) {
printk(KERN_EMERG "intc: failed to map registers (0x%08lx)\n",
(unsigned long)regs->start);
goto fail;
}
/*
* Initialize all interrupts to level 0 (lowest priority). The
* priority level may be changed by calling
* irq_set_priority().
*
*/
offset = (unsigned long)&irq_level0 - (unsigned long)&_evba;
for (i = 0; i < NR_INTERNAL_IRQS; i++) {
intc_writel(&intc0, INTPR0 + 4 * i, offset);
readback = intc_readl(&intc0, INTPR0 + 4 * i);
if (readback == offset)
irq_set_chip_and_handler(i, &intc0.chip,
handle_simple_irq);
}
/* Unmask all interrupt levels */
sysreg_write(SR, (sysreg_read(SR)
& ~(SR_I3M | SR_I2M | SR_I1M | SR_I0M)));
return;
fail:
panic("Interrupt controller initialization failed!\n");
}
#ifdef CONFIG_PM
void intc_set_suspend_handler(unsigned long offset)
{
intc0.suspend_ipr = offset;
}
static int intc_suspend(void)
{
int i;
if (unlikely(!irqs_disabled())) {
pr_err("intc_suspend: called with interrupts enabled\n");
return -EINVAL;
}
if (unlikely(!intc0.suspend_ipr)) {
pr_err("intc_suspend: suspend_ipr not initialized\n");
return -EINVAL;
}
for (i = 0; i < 64; i++) {
intc0.saved_ipr[i] = intc_readl(&intc0, INTPR0 + 4 * i);
intc_writel(&intc0, INTPR0 + 4 * i, intc0.suspend_ipr);
}
return 0;
}
static void intc_resume(void)
{
int i;
for (i = 0; i < 64; i++)
intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]);
}
#else
#define intc_suspend NULL
#define intc_resume NULL
#endif
static struct syscore_ops intc_syscore_ops = {
.suspend = intc_suspend,
.resume = intc_resume,
};
static int __init intc_init_syscore(void)
{
register_syscore_ops(&intc_syscore_ops);
return 0;
}
device_initcall(intc_init_syscore);
unsigned long intc_get_pending(unsigned int group)
{
return intc_readl(&intc0, INTREQ0 + 4 * group);
}
EXPORT_SYMBOL_GPL(intc_get_pending);
| gpl-2.0 |
harunjo/galaxsih-kernel-JB-S3 | arch/mn10300/lib/checksum.c | 13052 | 2346 | /* MN10300 Optimised checksumming wrappers
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include "internal.h"
static inline unsigned short from32to16(__wsum sum)
{
asm(" add %1,%0 \n"
" addc 0xffff,%0 \n"
: "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000)
: "cc"
);
return sum >> 16;
}
__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return ~do_csum(iph, ihl * 4);
}
EXPORT_SYMBOL(ip_fast_csum);
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
__wsum result;
result = do_csum(buff, len);
result += sum;
if (sum > result)
result++;
return result;
}
EXPORT_SYMBOL(csum_partial);
__sum16 ip_compute_csum(const void *buff, int len)
{
return ~from32to16(do_csum(buff, len));
}
EXPORT_SYMBOL(ip_compute_csum);
__wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
{
copy_from_user(dst, src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy);
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
sum = csum_partial(src, len, sum);
memcpy(dst, src, len);
return sum;
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
__wsum csum_partial_copy_from_user(const void *src, void *dst,
int len, __wsum sum,
int *err_ptr)
{
int missing;
missing = copy_from_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*err_ptr = -EFAULT;
}
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
__wsum csum_and_copy_to_user(const void *src, void *dst,
int len, __wsum sum,
int *err_ptr)
{
int missing;
missing = copy_to_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*err_ptr = -EFAULT;
}
return csum_partial(src, len, sum);
}
EXPORT_SYMBOL(csum_and_copy_to_user);
| gpl-2.0 |
Jeongseob/linux-page-coloring | drivers/crypto/ux500/hash/hash_core.c | 253 | 52384 | /*
* Cryptographic API.
* Support for Nomadik hardware crypto engine.
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
* Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
* Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
* Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
* Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) "hashX hashX: " fmt
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/klist.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/crypto.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <linux/platform_data/crypto-ux500.h>
#include "hash_alg.h"
static int hash_mode;
module_param(hash_mode, int, 0);
MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
/**
* Pre-calculated empty message digests.
*/
static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
0xaf, 0xd8, 0x07, 0x09
};
static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
};
/* HMAC-SHA1, no key */
static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
0x70, 0x69, 0x0e, 0x1d
};
/* HMAC-SHA256, no key */
static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
};
/**
* struct hash_driver_data - data specific to the driver.
*
* @device_list: A list of registered devices to choose from.
* @device_allocation: A semaphore initialized with number of devices.
*/
struct hash_driver_data {
struct klist device_list;
struct semaphore device_allocation;
};
static struct hash_driver_data driver_data;
/* Declaration of functions */
/**
* hash_messagepad - Pads a message and write the nblw bits.
* @device_data: Structure for the hash device.
* @message: Last word of a message
* @index_bytes: The number of bytes in the last message
*
* This function manages the final part of the digest calculation, when less
* than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
*
*/
static void hash_messagepad(struct hash_device_data *device_data,
const u32 *message, u8 index_bytes);
/**
* release_hash_device - Releases a previously allocated hash device.
* @device_data: Structure for the hash device.
*
*/
static void release_hash_device(struct hash_device_data *device_data)
{
spin_lock(&device_data->ctx_lock);
device_data->current_ctx->device = NULL;
device_data->current_ctx = NULL;
spin_unlock(&device_data->ctx_lock);
/*
* The down_interruptible part for this semaphore is called in
* cryp_get_device_data.
*/
up(&driver_data.device_allocation);
}
static void hash_dma_setup_channel(struct hash_device_data *device_data,
struct device *dev)
{
struct hash_platform_data *platform_data = dev->platform_data;
struct dma_slave_config conf = {
.direction = DMA_MEM_TO_DEV,
.dst_addr = device_data->phybase + HASH_DMA_FIFO,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.dst_maxburst = 16,
};
dma_cap_zero(device_data->dma.mask);
dma_cap_set(DMA_SLAVE, device_data->dma.mask);
device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
device_data->dma.chan_mem2hash =
dma_request_channel(device_data->dma.mask,
platform_data->dma_filter,
device_data->dma.cfg_mem2hash);
dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
init_completion(&device_data->dma.complete);
}
static void hash_dma_callback(void *data)
{
struct hash_ctx *ctx = data;
complete(&ctx->device->dma.complete);
}
static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
int len, enum dma_data_direction direction)
{
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *channel = NULL;
dma_cookie_t cookie;
if (direction != DMA_TO_DEVICE) {
dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
__func__);
return -EFAULT;
}
sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
channel = ctx->device->dma.chan_mem2hash;
ctx->device->dma.sg = sg;
ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
ctx->device->dma.sg, ctx->device->dma.nents,
direction);
if (!ctx->device->dma.sg_len) {
dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
__func__);
return -EFAULT;
}
dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
__func__);
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg, ctx->device->dma.sg_len,
direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
"%s: device_prep_slave_sg() failed!\n", __func__);
return -EFAULT;
}
desc->callback = hash_dma_callback;
desc->callback_param = ctx;
cookie = dmaengine_submit(desc);
dma_async_issue_pending(channel);
return 0;
}
static void hash_dma_done(struct hash_ctx *ctx)
{
struct dma_chan *chan;
chan = ctx->device->dma.chan_mem2hash;
dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
ctx->device->dma.sg_len, DMA_TO_DEVICE);
}
static int hash_dma_write(struct hash_ctx *ctx,
struct scatterlist *sg, int len)
{
int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
if (error) {
dev_dbg(ctx->device->dev,
"%s: hash_set_dma_transfer() failed\n", __func__);
return error;
}
return len;
}
/**
* get_empty_message_digest - Returns a pre-calculated digest for
* the empty message.
* @device_data: Structure for the hash device.
* @zero_hash: Buffer to return the empty message digest.
* @zero_hash_size: Hash size of the empty message digest.
* @zero_digest: True if zero_digest returned.
*/
static int get_empty_message_digest(
struct hash_device_data *device_data,
u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
{
int ret = 0;
struct hash_ctx *ctx = device_data->current_ctx;
*zero_digest = false;
/**
* Caller responsible for ctx != NULL.
*/
if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
memcpy(zero_hash, &zero_message_hash_sha1[0],
SHA1_DIGEST_SIZE);
*zero_hash_size = SHA1_DIGEST_SIZE;
*zero_digest = true;
} else if (HASH_ALGO_SHA256 ==
ctx->config.algorithm) {
memcpy(zero_hash, &zero_message_hash_sha256[0],
SHA256_DIGEST_SIZE);
*zero_hash_size = SHA256_DIGEST_SIZE;
*zero_digest = true;
} else {
dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
__func__);
ret = -EINVAL;
goto out;
}
} else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
if (!ctx->keylen) {
if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
memcpy(zero_hash, &zero_message_hmac_sha1[0],
SHA1_DIGEST_SIZE);
*zero_hash_size = SHA1_DIGEST_SIZE;
*zero_digest = true;
} else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
memcpy(zero_hash, &zero_message_hmac_sha256[0],
SHA256_DIGEST_SIZE);
*zero_hash_size = SHA256_DIGEST_SIZE;
*zero_digest = true;
} else {
dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
__func__);
ret = -EINVAL;
goto out;
}
} else {
dev_dbg(device_data->dev,
"%s: Continue hash calculation, since hmac key available\n",
__func__);
}
}
out:
return ret;
}
/**
* hash_disable_power - Request to disable power and clock.
* @device_data: Structure for the hash device.
* @save_device_state: If true, saves the current hw state.
*
* This function request for disabling power (regulator) and clock,
* and could also save current hw state.
*/
static int hash_disable_power(struct hash_device_data *device_data,
bool save_device_state)
{
int ret = 0;
struct device *dev = device_data->dev;
spin_lock(&device_data->power_state_lock);
if (!device_data->power_state)
goto out;
if (save_device_state) {
hash_save_state(device_data,
&device_data->state);
device_data->restore_dev_state = true;
}
clk_disable(device_data->clk);
ret = regulator_disable(device_data->regulator);
if (ret)
dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
device_data->power_state = false;
out:
spin_unlock(&device_data->power_state_lock);
return ret;
}
/**
* hash_enable_power - Request to enable power and clock.
* @device_data: Structure for the hash device.
* @restore_device_state: If true, restores a previous saved hw state.
*
* This function request for enabling power (regulator) and clock,
* and could also restore a previously saved hw state.
*/
static int hash_enable_power(struct hash_device_data *device_data,
bool restore_device_state)
{
int ret = 0;
struct device *dev = device_data->dev;
spin_lock(&device_data->power_state_lock);
if (!device_data->power_state) {
ret = regulator_enable(device_data->regulator);
if (ret) {
dev_err(dev, "%s: regulator_enable() failed!\n",
__func__);
goto out;
}
ret = clk_enable(device_data->clk);
if (ret) {
dev_err(dev, "%s: clk_enable() failed!\n", __func__);
ret = regulator_disable(
device_data->regulator);
goto out;
}
device_data->power_state = true;
}
if (device_data->restore_dev_state) {
if (restore_device_state) {
device_data->restore_dev_state = false;
hash_resume_state(device_data, &device_data->state);
}
}
out:
spin_unlock(&device_data->power_state_lock);
return ret;
}
/**
* hash_get_device_data - Checks for an available hash device and return it.
* @hash_ctx: Structure for the hash context.
* @device_data: Structure for the hash device.
*
* This function check for an available hash device and return it to
* the caller.
* Note! Caller need to release the device, calling up().
*/
static int hash_get_device_data(struct hash_ctx *ctx,
struct hash_device_data **device_data)
{
int ret;
struct klist_iter device_iterator;
struct klist_node *device_node;
struct hash_device_data *local_device_data = NULL;
/* Wait until a device is available */
ret = down_interruptible(&driver_data.device_allocation);
if (ret)
return ret; /* Interrupted */
/* Select a device */
klist_iter_init(&driver_data.device_list, &device_iterator);
device_node = klist_next(&device_iterator);
while (device_node) {
local_device_data = container_of(device_node,
struct hash_device_data, list_node);
spin_lock(&local_device_data->ctx_lock);
/* current_ctx allocates a device, NULL = unallocated */
if (local_device_data->current_ctx) {
device_node = klist_next(&device_iterator);
} else {
local_device_data->current_ctx = ctx;
ctx->device = local_device_data;
spin_unlock(&local_device_data->ctx_lock);
break;
}
spin_unlock(&local_device_data->ctx_lock);
}
klist_iter_exit(&device_iterator);
if (!device_node) {
/**
* No free device found.
* Since we allocated a device with down_interruptible, this
* should not be able to happen.
* Number of available devices, which are contained in
* device_allocation, is therefore decremented by not doing
* an up(device_allocation).
*/
return -EBUSY;
}
*device_data = local_device_data;
return 0;
}
/**
* hash_hw_write_key - Writes the key to the hardware registries.
*
* @device_data: Structure for the hash device.
* @key: Key to be written.
* @keylen: The lengt of the key.
*
* Note! This function DOES NOT write to the NBLW registry, even though
* specified in the the hw design spec. Either due to incorrect info in the
* spec or due to a bug in the hw.
*/
static void hash_hw_write_key(struct hash_device_data *device_data,
const u8 *key, unsigned int keylen)
{
u32 word = 0;
int nwords = 1;
HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
while (keylen >= 4) {
u32 *key_word = (u32 *)key;
HASH_SET_DIN(key_word, nwords);
keylen -= 4;
key += 4;
}
/* Take care of the remaining bytes in the last word */
if (keylen) {
word = 0;
while (keylen) {
word |= (key[keylen - 1] << (8 * (keylen - 1)));
keylen--;
}
HASH_SET_DIN(&word, nwords);
}
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
HASH_SET_DCAL;
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
}
/**
* init_hash_hw - Initialise the hash hardware for a new calculation.
* @device_data: Structure for the hash device.
* @ctx: The hash context.
*
* This function will enable the bits needed to clear and start a new
* calculation.
*/
static int init_hash_hw(struct hash_device_data *device_data,
struct hash_ctx *ctx)
{
int ret = 0;
ret = hash_setconfiguration(device_data, &ctx->config);
if (ret) {
dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
__func__);
return ret;
}
hash_begin(device_data, ctx);
if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
hash_hw_write_key(device_data, ctx->key, ctx->keylen);
return ret;
}
/**
* hash_get_nents - Return number of entries (nents) in scatterlist (sg).
*
* @sg: Scatterlist.
* @size: Size in bytes.
* @aligned: True if sg data aligned to work in DMA mode.
*
*/
static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
{
int nents = 0;
bool aligned_data = true;
while (size > 0 && sg) {
nents++;
size -= sg->length;
/* hash_set_dma_transfer will align last nent */
if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
(!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
aligned_data = false;
sg = sg_next(sg);
}
if (aligned)
*aligned = aligned_data;
if (size != 0)
return -EFAULT;
return nents;
}
/**
* hash_dma_valid_data - checks for dma valid sg data.
* @sg: Scatterlist.
* @datasize: Datasize in bytes.
*
* NOTE! This function checks for dma valid sg data, since dma
* only accept datasizes of even wordsize.
*/
static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
{
bool aligned;
/* Need to include at least one nent, else error */
if (hash_get_nents(sg, datasize, &aligned) < 1)
return false;
return aligned;
}
/**
* hash_init - Common hash init function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*
* Initialize structures.
*/
static int hash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
if (!ctx->key)
ctx->keylen = 0;
memset(&req_ctx->state, 0, sizeof(struct hash_state));
req_ctx->updated = 0;
if (hash_mode == HASH_MODE_DMA) {
if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
req_ctx->dma_mode = false; /* Don't use DMA */
pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
__func__, HASH_DMA_ALIGN_SIZE);
} else {
if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
hash_dma_valid_data(req->src, req->nbytes)) {
req_ctx->dma_mode = true;
} else {
req_ctx->dma_mode = false;
pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
__func__,
HASH_DMA_PERFORMANCE_MIN_SIZE);
}
}
}
return 0;
}
/**
* hash_processblock - This function processes a single block of 512 bits (64
* bytes), word aligned, starting at message.
* @device_data: Structure for the hash device.
* @message: Block (512 bits) of message to be written to
* the HASH hardware.
*
*/
static void hash_processblock(struct hash_device_data *device_data,
const u32 *message, int length)
{
int len = length / HASH_BYTES_PER_WORD;
/*
* NBLW bits. Reset the number of bits in last word (NBLW).
*/
HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
/*
* Write message data to the HASH_DIN register.
*/
HASH_SET_DIN(message, len);
}
/**
* hash_messagepad - Pads a message and write the nblw bits.
* @device_data: Structure for the hash device.
* @message: Last word of a message.
* @index_bytes: The number of bytes in the last message.
*
* This function manages the final part of the digest calculation, when less
* than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
*
*/
static void hash_messagepad(struct hash_device_data *device_data,
const u32 *message, u8 index_bytes)
{
int nwords = 1;
/*
* Clear hash str register, only clear NBLW
* since DCAL will be reset by hardware.
*/
HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
/* Main loop */
while (index_bytes >= 4) {
HASH_SET_DIN(message, nwords);
index_bytes -= 4;
message++;
}
if (index_bytes)
HASH_SET_DIN(message, nwords);
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
/* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
HASH_SET_NBLW(index_bytes * 8);
dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
__func__, readl_relaxed(&device_data->base->din),
readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
HASH_SET_DCAL;
dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
__func__, readl_relaxed(&device_data->base->din),
readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
}
/**
* hash_incrementlength - Increments the length of the current message.
* @ctx: Hash context
* @incr: Length of message processed already
*
* Overflow cannot occur, because conditions for overflow are checked in
* hash_hw_update.
*/
static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
{
ctx->state.length.low_word += incr;
/* Check for wrap-around */
if (ctx->state.length.low_word < incr)
ctx->state.length.high_word++;
}
/**
* hash_setconfiguration - Sets the required configuration for the hash
* hardware.
* @device_data: Structure for the hash device.
* @config: Pointer to a configuration structure.
*/
int hash_setconfiguration(struct hash_device_data *device_data,
struct hash_config *config)
{
int ret = 0;
if (config->algorithm != HASH_ALGO_SHA1 &&
config->algorithm != HASH_ALGO_SHA256)
return -EPERM;
/*
* DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
* to be written to HASH_DIN is considered as 32 bits.
*/
HASH_SET_DATA_FORMAT(config->data_format);
/*
* ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
*/
switch (config->algorithm) {
case HASH_ALGO_SHA1:
HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
break;
case HASH_ALGO_SHA256:
HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
break;
default:
dev_err(device_data->dev, "%s: Incorrect algorithm\n",
__func__);
return -EPERM;
}
/*
* MODE bit. This bit selects between HASH or HMAC mode for the
* selected algorithm. 0b0 = HASH and 0b1 = HMAC.
*/
if (HASH_OPER_MODE_HASH == config->oper_mode)
HASH_CLEAR_BITS(&device_data->base->cr,
HASH_CR_MODE_MASK);
else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
/* Truncate key to blocksize */
dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
HASH_SET_BITS(&device_data->base->cr,
HASH_CR_LKEY_MASK);
} else {
dev_dbg(device_data->dev, "%s: LKEY cleared\n",
__func__);
HASH_CLEAR_BITS(&device_data->base->cr,
HASH_CR_LKEY_MASK);
}
} else { /* Wrong hash mode */
ret = -EPERM;
dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
__func__);
}
return ret;
}
/**
* hash_begin - This routine resets some globals and initializes the hash
* hardware.
* @device_data: Structure for the hash device.
* @ctx: Hash context.
*/
void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
{
/* HW and SW initializations */
/* Note: there is no need to initialize buffer and digest members */
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
/*
* INIT bit. Set this bit to 0b1 to reset the HASH processor core and
* prepare the initialize the HASH accelerator to compute the message
* digest of a new message.
*/
HASH_INITIALIZE;
/*
* NBLW bits. Reset the number of bits in last word (NBLW).
*/
HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
}
static int hash_process_data(struct hash_device_data *device_data,
struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
int msg_length, u8 *data_buffer, u8 *buffer,
u8 *index)
{
int ret = 0;
u32 count;
do {
if ((*index + msg_length) < HASH_BLOCK_SIZE) {
for (count = 0; count < msg_length; count++) {
buffer[*index + count] =
*(data_buffer + count);
}
*index += msg_length;
msg_length = 0;
} else {
if (req_ctx->updated) {
ret = hash_resume_state(device_data,
&device_data->state);
memmove(req_ctx->state.buffer,
device_data->state.buffer,
HASH_BLOCK_SIZE / sizeof(u32));
if (ret) {
dev_err(device_data->dev,
"%s: hash_resume_state() failed!\n",
__func__);
goto out;
}
} else {
ret = init_hash_hw(device_data, ctx);
if (ret) {
dev_err(device_data->dev,
"%s: init_hash_hw() failed!\n",
__func__);
goto out;
}
req_ctx->updated = 1;
}
/*
* If 'data_buffer' is four byte aligned and
* local buffer does not have any data, we can
* write data directly from 'data_buffer' to
* HW peripheral, otherwise we first copy data
* to a local buffer
*/
if ((0 == (((u32)data_buffer) % 4)) &&
(0 == *index))
hash_processblock(device_data,
(const u32 *)data_buffer,
HASH_BLOCK_SIZE);
else {
for (count = 0;
count < (u32)(HASH_BLOCK_SIZE - *index);
count++) {
buffer[*index + count] =
*(data_buffer + count);
}
hash_processblock(device_data,
(const u32 *)buffer,
HASH_BLOCK_SIZE);
}
hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
data_buffer += (HASH_BLOCK_SIZE - *index);
msg_length -= (HASH_BLOCK_SIZE - *index);
*index = 0;
ret = hash_save_state(device_data,
&device_data->state);
memmove(device_data->state.buffer,
req_ctx->state.buffer,
HASH_BLOCK_SIZE / sizeof(u32));
if (ret) {
dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
__func__);
goto out;
}
}
} while (msg_length != 0);
out:
return ret;
}
/**
* hash_dma_final - The hash dma final function for SHA1/SHA256.
* @req: The hash request for the job.
*/
static int hash_dma_final(struct ahash_request *req)
{
int ret = 0;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
struct hash_device_data *device_data;
u8 digest[SHA256_DIGEST_SIZE];
int bytes_written = 0;
ret = hash_get_device_data(ctx, &device_data);
if (ret)
return ret;
dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
if (req_ctx->updated) {
ret = hash_resume_state(device_data, &device_data->state);
if (ret) {
dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
__func__);
goto out;
}
}
if (!req_ctx->updated) {
ret = hash_setconfiguration(device_data, &ctx->config);
if (ret) {
dev_err(device_data->dev,
"%s: hash_setconfiguration() failed!\n",
__func__);
goto out;
}
/* Enable DMA input */
if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
HASH_CLEAR_BITS(&device_data->base->cr,
HASH_CR_DMAE_MASK);
} else {
HASH_SET_BITS(&device_data->base->cr,
HASH_CR_DMAE_MASK);
HASH_SET_BITS(&device_data->base->cr,
HASH_CR_PRIVN_MASK);
}
HASH_INITIALIZE;
if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
hash_hw_write_key(device_data, ctx->key, ctx->keylen);
/* Number of bits in last word = (nbytes * 8) % 32 */
HASH_SET_NBLW((req->nbytes * 8) % 32);
req_ctx->updated = 1;
}
/* Store the nents in the dma struct. */
ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
if (!ctx->device->dma.nents) {
dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
__func__);
ret = ctx->device->dma.nents;
goto out;
}
bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
if (bytes_written != req->nbytes) {
dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
__func__);
ret = bytes_written;
goto out;
}
wait_for_completion(&ctx->device->dma.complete);
hash_dma_done(ctx);
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
unsigned int keylen = ctx->keylen;
u8 *key = ctx->key;
dev_dbg(device_data->dev, "%s: keylen: %d\n",
__func__, ctx->keylen);
hash_hw_write_key(device_data, key, keylen);
}
hash_get_digest(device_data, digest, ctx->config.algorithm);
memcpy(req->result, digest, ctx->digestsize);
out:
release_hash_device(device_data);
/**
* Allocated in setkey, and only used in HMAC.
*/
kfree(ctx->key);
return ret;
}
/**
* hash_hw_final - The final hash calculation function
* @req: The hash request for the job.
*/
static int hash_hw_final(struct ahash_request *req)
{
int ret = 0;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
struct hash_device_data *device_data;
u8 digest[SHA256_DIGEST_SIZE];
ret = hash_get_device_data(ctx, &device_data);
if (ret)
return ret;
dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
if (req_ctx->updated) {
ret = hash_resume_state(device_data, &device_data->state);
if (ret) {
dev_err(device_data->dev,
"%s: hash_resume_state() failed!\n", __func__);
goto out;
}
} else if (req->nbytes == 0 && ctx->keylen == 0) {
u8 zero_hash[SHA256_DIGEST_SIZE];
u32 zero_hash_size = 0;
bool zero_digest = false;
/**
* Use a pre-calculated empty message digest
* (workaround since hw return zeroes, hw bug!?)
*/
ret = get_empty_message_digest(device_data, &zero_hash[0],
&zero_hash_size, &zero_digest);
if (!ret && likely(zero_hash_size == ctx->digestsize) &&
zero_digest) {
memcpy(req->result, &zero_hash[0], ctx->digestsize);
goto out;
} else if (!ret && !zero_digest) {
dev_dbg(device_data->dev,
"%s: HMAC zero msg with key, continue...\n",
__func__);
} else {
dev_err(device_data->dev,
"%s: ret=%d, or wrong digest size? %s\n",
__func__, ret,
zero_hash_size == ctx->digestsize ?
"true" : "false");
/* Return error */
goto out;
}
} else if (req->nbytes == 0 && ctx->keylen > 0) {
dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
__func__);
goto out;
}
if (!req_ctx->updated) {
ret = init_hash_hw(device_data, ctx);
if (ret) {
dev_err(device_data->dev,
"%s: init_hash_hw() failed!\n", __func__);
goto out;
}
}
if (req_ctx->state.index) {
hash_messagepad(device_data, req_ctx->state.buffer,
req_ctx->state.index);
} else {
HASH_SET_DCAL;
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
}
if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
unsigned int keylen = ctx->keylen;
u8 *key = ctx->key;
dev_dbg(device_data->dev, "%s: keylen: %d\n",
__func__, ctx->keylen);
hash_hw_write_key(device_data, key, keylen);
}
hash_get_digest(device_data, digest, ctx->config.algorithm);
memcpy(req->result, digest, ctx->digestsize);
out:
release_hash_device(device_data);
/**
* Allocated in setkey, and only used in HMAC.
*/
kfree(ctx->key);
return ret;
}
/**
* hash_hw_update - Updates current HASH computation hashing another part of
* the message.
* @req: Byte array containing the message to be hashed (caller
* allocated).
*/
int hash_hw_update(struct ahash_request *req)
{
int ret = 0;
u8 index = 0;
u8 *buffer;
struct hash_device_data *device_data;
u8 *data_buffer;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_hash_walk walk;
int msg_length = crypto_hash_walk_first(req, &walk);
/* Empty message ("") is correct indata */
if (msg_length == 0)
return ret;
index = req_ctx->state.index;
buffer = (u8 *)req_ctx->state.buffer;
/* Check if ctx->state.length + msg_length
overflows */
if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
return -EPERM;
}
ret = hash_get_device_data(ctx, &device_data);
if (ret)
return ret;
/* Main loop */
while (0 != msg_length) {
data_buffer = walk.data;
ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
data_buffer, buffer, &index);
if (ret) {
dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
__func__);
goto out;
}
msg_length = crypto_hash_walk_done(&walk, 0);
}
req_ctx->state.index = index;
dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
__func__, req_ctx->state.index, req_ctx->state.bit_index);
out:
release_hash_device(device_data);
return ret;
}
/**
* hash_resume_state - Function that resumes the state of an calculation.
* @device_data: Pointer to the device structure.
* @device_state: The state to be restored in the hash hardware
*/
int hash_resume_state(struct hash_device_data *device_data,
const struct hash_state *device_state)
{
u32 temp_cr;
s32 count;
int hash_mode = HASH_OPER_MODE_HASH;
if (NULL == device_state) {
dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
__func__);
return -EPERM;
}
/* Check correctness of index and length members */
if (device_state->index > HASH_BLOCK_SIZE ||
(device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
__func__);
return -EPERM;
}
/*
* INIT bit. Set this bit to 0b1 to reset the HASH processor core and
* prepare the initialize the HASH accelerator to compute the message
* digest of a new message.
*/
HASH_INITIALIZE;
temp_cr = device_state->temp_cr;
writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
hash_mode = HASH_OPER_MODE_HMAC;
else
hash_mode = HASH_OPER_MODE_HASH;
for (count = 0; count < HASH_CSR_COUNT; count++) {
if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
break;
writel_relaxed(device_state->csr[count],
&device_data->base->csrx[count]);
}
writel_relaxed(device_state->csfull, &device_data->base->csfull);
writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
writel_relaxed(device_state->str_reg, &device_data->base->str);
writel_relaxed(temp_cr, &device_data->base->cr);
return 0;
}
/**
* hash_save_state - Function that saves the state of hardware.
* @device_data: Pointer to the device structure.
* @device_state: The strucure where the hardware state should be saved.
*/
int hash_save_state(struct hash_device_data *device_data,
struct hash_state *device_state)
{
u32 temp_cr;
u32 count;
int hash_mode = HASH_OPER_MODE_HASH;
if (NULL == device_state) {
dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
__func__);
return -ENOTSUPP;
}
/* Write dummy value to force digest intermediate calculation. This
* actually makes sure that there isn't any ongoing calculation in the
* hardware.
*/
while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
temp_cr = readl_relaxed(&device_data->base->cr);
device_state->str_reg = readl_relaxed(&device_data->base->str);
device_state->din_reg = readl_relaxed(&device_data->base->din);
if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
hash_mode = HASH_OPER_MODE_HMAC;
else
hash_mode = HASH_OPER_MODE_HASH;
for (count = 0; count < HASH_CSR_COUNT; count++) {
if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
break;
device_state->csr[count] =
readl_relaxed(&device_data->base->csrx[count]);
}
device_state->csfull = readl_relaxed(&device_data->base->csfull);
device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
device_state->temp_cr = temp_cr;
return 0;
}
/**
* hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
* @device_data:
*
*/
int hash_check_hw(struct hash_device_data *device_data)
{
/* Checking Peripheral Ids */
if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
return 0;
}
dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
return -ENOTSUPP;
}
/**
* hash_get_digest - Gets the digest.
* @device_data: Pointer to the device structure.
* @digest: User allocated byte array for the calculated digest.
* @algorithm: The algorithm in use.
*/
void hash_get_digest(struct hash_device_data *device_data,
u8 *digest, int algorithm)
{
u32 temp_hx_val, count;
int loop_ctr;
if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
__func__, algorithm);
return;
}
if (algorithm == HASH_ALGO_SHA1)
loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
else
loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
__func__, (u32) digest);
/* Copy result into digest array */
for (count = 0; count < loop_ctr; count++) {
temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
}
}
/**
* hash_update - The hash update function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_update(struct ahash_request *req)
{
int ret = 0;
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
ret = hash_hw_update(req);
/* Skip update for DMA, all data will be passed to DMA in final */
if (ret) {
pr_err("%s: hash_hw_update() failed!\n", __func__);
}
return ret;
}
/**
* hash_final - The hash final function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_final(struct ahash_request *req)
{
int ret = 0;
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
pr_debug("%s: data size: %d\n", __func__, req->nbytes);
if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
ret = hash_dma_final(req);
else
ret = hash_hw_final(req);
if (ret) {
pr_err("%s: hash_hw/dma_final() failed\n", __func__);
}
return ret;
}
static int hash_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen, int alg)
{
int ret = 0;
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
/**
* Freed in final.
*/
ctx->key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key) {
pr_err("%s: Failed to allocate ctx->key for %d\n",
__func__, alg);
return -ENOMEM;
}
ctx->keylen = keylen;
return ret;
}
static int ahash_sha1_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA1;
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
ctx->digestsize = SHA1_DIGEST_SIZE;
return hash_init(req);
}
static int ahash_sha256_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA256;
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
ctx->digestsize = SHA256_DIGEST_SIZE;
return hash_init(req);
}
static int ahash_sha1_digest(struct ahash_request *req)
{
int ret2, ret1;
ret1 = ahash_sha1_init(req);
if (ret1)
goto out;
ret1 = ahash_update(req);
ret2 = ahash_final(req);
out:
return ret1 ? ret1 : ret2;
}
static int ahash_sha256_digest(struct ahash_request *req)
{
int ret2, ret1;
ret1 = ahash_sha256_init(req);
if (ret1)
goto out;
ret1 = ahash_update(req);
ret2 = ahash_final(req);
out:
return ret1 ? ret1 : ret2;
}
static int hmac_sha1_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA1;
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA1_DIGEST_SIZE;
return hash_init(req);
}
static int hmac_sha256_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = HASH_ALGO_SHA256;
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA256_DIGEST_SIZE;
return hash_init(req);
}
static int hmac_sha1_digest(struct ahash_request *req)
{
int ret2, ret1;
ret1 = hmac_sha1_init(req);
if (ret1)
goto out;
ret1 = ahash_update(req);
ret2 = ahash_final(req);
out:
return ret1 ? ret1 : ret2;
}
static int hmac_sha256_digest(struct ahash_request *req)
{
int ret2, ret1;
ret1 = hmac_sha256_init(req);
if (ret1)
goto out;
ret1 = ahash_update(req);
ret2 = ahash_final(req);
out:
return ret1 ? ret1 : ret2;
}
static int hmac_sha1_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
}
static int hmac_sha256_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
}
struct hash_algo_template {
struct hash_config conf;
struct ahash_alg hash;
};
static int hash_cra_init(struct crypto_tfm *tfm)
{
struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
struct hash_algo_template *hash_alg;
hash_alg = container_of(__crypto_ahash_alg(alg),
struct hash_algo_template,
hash);
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct hash_req_ctx));
ctx->config.data_format = HASH_DATA_8_BITS;
ctx->config.algorithm = hash_alg->conf.algorithm;
ctx->config.oper_mode = hash_alg->conf.oper_mode;
ctx->digestsize = hash_alg->hash.halg.digestsize;
return 0;
}
static struct hash_algo_template hash_algs[] = {
{
.conf.algorithm = HASH_ALGO_SHA1,
.conf.oper_mode = HASH_OPER_MODE_HASH,
.hash = {
.init = hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha1_digest,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ux500",
.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC),
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
}
},
{
.conf.algorithm = HASH_ALGO_SHA256,
.conf.oper_mode = HASH_OPER_MODE_HASH,
.hash = {
.init = hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha256_digest,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ux500",
.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC),
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
}
},
{
.conf.algorithm = HASH_ALGO_SHA1,
.conf.oper_mode = HASH_OPER_MODE_HMAC,
.hash = {
.init = hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = hmac_sha1_digest,
.setkey = hmac_sha1_setkey,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-ux500",
.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC),
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
}
},
{
.conf.algorithm = HASH_ALGO_SHA256,
.conf.oper_mode = HASH_OPER_MODE_HMAC,
.hash = {
.init = hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = hmac_sha256_digest,
.setkey = hmac_sha256_setkey,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-ux500",
.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC),
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
}
}
};
/**
* hash_algs_register_all -
*/
static int ahash_algs_register_all(struct hash_device_data *device_data)
{
int ret;
int i;
int count;
for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
ret = crypto_register_ahash(&hash_algs[i].hash);
if (ret) {
count = i;
dev_err(device_data->dev, "%s: alg registration failed\n",
hash_algs[i].hash.halg.base.cra_driver_name);
goto unreg;
}
}
return 0;
unreg:
for (i = 0; i < count; i++)
crypto_unregister_ahash(&hash_algs[i].hash);
return ret;
}
/**
* hash_algs_unregister_all -
*/
static void ahash_algs_unregister_all(struct hash_device_data *device_data)
{
int i;
for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
crypto_unregister_ahash(&hash_algs[i].hash);
}
/**
* ux500_hash_probe - Function that probes the hash hardware.
* @pdev: The platform device.
*/
static int ux500_hash_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res = NULL;
struct hash_device_data *device_data;
struct device *dev = &pdev->dev;
device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC);
if (!device_data) {
ret = -ENOMEM;
goto out;
}
device_data->dev = dev;
device_data->current_ctx = NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
ret = -ENODEV;
goto out_kfree;
}
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (res == NULL) {
dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__);
ret = -EBUSY;
goto out_kfree;
}
device_data->phybase = res->start;
device_data->base = ioremap(res->start, resource_size(res));
if (!device_data->base) {
dev_err(dev, "%s: ioremap() failed!\n", __func__);
ret = -ENOMEM;
goto out_free_mem;
}
spin_lock_init(&device_data->ctx_lock);
spin_lock_init(&device_data->power_state_lock);
/* Enable power for HASH1 hardware block */
device_data->regulator = regulator_get(dev, "v-ape");
if (IS_ERR(device_data->regulator)) {
dev_err(dev, "%s: regulator_get() failed!\n", __func__);
ret = PTR_ERR(device_data->regulator);
device_data->regulator = NULL;
goto out_unmap;
}
/* Enable the clock for HASH1 hardware block */
device_data->clk = clk_get(dev, NULL);
if (IS_ERR(device_data->clk)) {
dev_err(dev, "%s: clk_get() failed!\n", __func__);
ret = PTR_ERR(device_data->clk);
goto out_regulator;
}
ret = clk_prepare(device_data->clk);
if (ret) {
dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
goto out_clk;
}
/* Enable device power (and clock) */
ret = hash_enable_power(device_data, false);
if (ret) {
dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
goto out_clk_unprepare;
}
ret = hash_check_hw(device_data);
if (ret) {
dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
goto out_power;
}
if (hash_mode == HASH_MODE_DMA)
hash_dma_setup_channel(device_data, dev);
platform_set_drvdata(pdev, device_data);
/* Put the new device into the device list... */
klist_add_tail(&device_data->list_node, &driver_data.device_list);
/* ... and signal that a new device is available. */
up(&driver_data.device_allocation);
ret = ahash_algs_register_all(device_data);
if (ret) {
dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
__func__);
goto out_power;
}
dev_info(dev, "successfully registered\n");
return 0;
out_power:
hash_disable_power(device_data, false);
out_clk_unprepare:
clk_unprepare(device_data->clk);
out_clk:
clk_put(device_data->clk);
out_regulator:
regulator_put(device_data->regulator);
out_unmap:
iounmap(device_data->base);
out_free_mem:
release_mem_region(res->start, resource_size(res));
out_kfree:
kfree(device_data);
out:
return ret;
}
/**
* ux500_hash_remove - Function that removes the hash device from the platform.
* @pdev: The platform device.
*/
static int ux500_hash_remove(struct platform_device *pdev)
{
struct resource *res;
struct hash_device_data *device_data;
struct device *dev = &pdev->dev;
device_data = platform_get_drvdata(pdev);
if (!device_data) {
dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
return -ENOMEM;
}
/* Try to decrease the number of available devices. */
if (down_trylock(&driver_data.device_allocation))
return -EBUSY;
/* Check that the device is free */
spin_lock(&device_data->ctx_lock);
/* current_ctx allocates a device, NULL = unallocated */
if (device_data->current_ctx) {
/* The device is busy */
spin_unlock(&device_data->ctx_lock);
/* Return the device to the pool. */
up(&driver_data.device_allocation);
return -EBUSY;
}
spin_unlock(&device_data->ctx_lock);
/* Remove the device from the list */
if (klist_node_attached(&device_data->list_node))
klist_remove(&device_data->list_node);
/* If this was the last device, remove the services */
if (list_empty(&driver_data.device_list.k_list))
ahash_algs_unregister_all(device_data);
if (hash_disable_power(device_data, false))
dev_err(dev, "%s: hash_disable_power() failed\n",
__func__);
clk_unprepare(device_data->clk);
clk_put(device_data->clk);
regulator_put(device_data->regulator);
iounmap(device_data->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
kfree(device_data);
return 0;
}
/**
* ux500_hash_shutdown - Function that shutdown the hash device.
* @pdev: The platform device
*/
static void ux500_hash_shutdown(struct platform_device *pdev)
{
struct resource *res = NULL;
struct hash_device_data *device_data;
device_data = platform_get_drvdata(pdev);
if (!device_data) {
dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
__func__);
return;
}
/* Check that the device is free */
spin_lock(&device_data->ctx_lock);
/* current_ctx allocates a device, NULL = unallocated */
if (!device_data->current_ctx) {
if (down_trylock(&driver_data.device_allocation))
dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
__func__);
/**
* (Allocate the device)
* Need to set this to non-null (dummy) value,
* to avoid usage if context switching.
*/
device_data->current_ctx++;
}
spin_unlock(&device_data->ctx_lock);
/* Remove the device from the list */
if (klist_node_attached(&device_data->list_node))
klist_remove(&device_data->list_node);
/* If this was the last device, remove the services */
if (list_empty(&driver_data.device_list.k_list))
ahash_algs_unregister_all(device_data);
iounmap(device_data->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
if (hash_disable_power(device_data, false))
dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
__func__);
}
/**
* ux500_hash_suspend - Function that suspends the hash device.
* @dev: Device to suspend.
*/
static int ux500_hash_suspend(struct device *dev)
{
int ret;
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
device_data = dev_get_drvdata(dev);
if (!device_data) {
dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
return -ENOMEM;
}
spin_lock(&device_data->ctx_lock);
if (!device_data->current_ctx)
device_data->current_ctx++;
spin_unlock(&device_data->ctx_lock);
if (device_data->current_ctx == ++temp_ctx) {
if (down_interruptible(&driver_data.device_allocation))
dev_dbg(dev, "%s: down_interruptible() failed\n",
__func__);
ret = hash_disable_power(device_data, false);
} else {
ret = hash_disable_power(device_data, true);
}
if (ret)
dev_err(dev, "%s: hash_disable_power()\n", __func__);
return ret;
}
/**
* ux500_hash_resume - Function that resume the hash device.
* @dev: Device to resume.
*/
static int ux500_hash_resume(struct device *dev)
{
int ret = 0;
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
device_data = dev_get_drvdata(dev);
if (!device_data) {
dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
return -ENOMEM;
}
spin_lock(&device_data->ctx_lock);
if (device_data->current_ctx == ++temp_ctx)
device_data->current_ctx = NULL;
spin_unlock(&device_data->ctx_lock);
if (!device_data->current_ctx)
up(&driver_data.device_allocation);
else
ret = hash_enable_power(device_data, true);
if (ret)
dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
return ret;
}
static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
static const struct of_device_id ux500_hash_match[] = {
{ .compatible = "stericsson,ux500-hash" },
{ },
};
static struct platform_driver hash_driver = {
.probe = ux500_hash_probe,
.remove = ux500_hash_remove,
.shutdown = ux500_hash_shutdown,
.driver = {
.owner = THIS_MODULE,
.name = "hash1",
.of_match_table = ux500_hash_match,
.pm = &ux500_hash_pm,
}
};
/**
* ux500_hash_mod_init - The kernel module init function.
*/
static int __init ux500_hash_mod_init(void)
{
klist_init(&driver_data.device_list, NULL, NULL);
/* Initialize the semaphore to 0 devices (locked state) */
sema_init(&driver_data.device_allocation, 0);
return platform_driver_register(&hash_driver);
}
/**
* ux500_hash_mod_fini - The kernel module exit function.
*/
static void __exit ux500_hash_mod_fini(void)
{
platform_driver_unregister(&hash_driver);
}
module_init(ux500_hash_mod_init);
module_exit(ux500_hash_mod_fini);
MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("sha1-all");
MODULE_ALIAS_CRYPTO("sha256-all");
MODULE_ALIAS_CRYPTO("hmac-sha1-all");
MODULE_ALIAS_CRYPTO("hmac-sha256-all");
| gpl-2.0 |
pocketbook-free/kernel_613 | drivers/gpu/drm/nouveau/nv04_graph.c | 765 | 30189 | /*
* Copyright 2007 Stephane Marchesin
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
static uint32_t nv04_graph_ctx_regs[] = {
0x0040053c,
0x00400544,
0x00400540,
0x00400548,
NV04_PGRAPH_CTX_SWITCH1,
NV04_PGRAPH_CTX_SWITCH2,
NV04_PGRAPH_CTX_SWITCH3,
NV04_PGRAPH_CTX_SWITCH4,
NV04_PGRAPH_CTX_CACHE1,
NV04_PGRAPH_CTX_CACHE2,
NV04_PGRAPH_CTX_CACHE3,
NV04_PGRAPH_CTX_CACHE4,
0x00400184,
0x004001a4,
0x004001c4,
0x004001e4,
0x00400188,
0x004001a8,
0x004001c8,
0x004001e8,
0x0040018c,
0x004001ac,
0x004001cc,
0x004001ec,
0x00400190,
0x004001b0,
0x004001d0,
0x004001f0,
0x00400194,
0x004001b4,
0x004001d4,
0x004001f4,
0x00400198,
0x004001b8,
0x004001d8,
0x004001f8,
0x0040019c,
0x004001bc,
0x004001dc,
0x004001fc,
0x00400174,
NV04_PGRAPH_DMA_START_0,
NV04_PGRAPH_DMA_START_1,
NV04_PGRAPH_DMA_LENGTH,
NV04_PGRAPH_DMA_MISC,
NV04_PGRAPH_DMA_PITCH,
NV04_PGRAPH_BOFFSET0,
NV04_PGRAPH_BBASE0,
NV04_PGRAPH_BLIMIT0,
NV04_PGRAPH_BOFFSET1,
NV04_PGRAPH_BBASE1,
NV04_PGRAPH_BLIMIT1,
NV04_PGRAPH_BOFFSET2,
NV04_PGRAPH_BBASE2,
NV04_PGRAPH_BLIMIT2,
NV04_PGRAPH_BOFFSET3,
NV04_PGRAPH_BBASE3,
NV04_PGRAPH_BLIMIT3,
NV04_PGRAPH_BOFFSET4,
NV04_PGRAPH_BBASE4,
NV04_PGRAPH_BLIMIT4,
NV04_PGRAPH_BOFFSET5,
NV04_PGRAPH_BBASE5,
NV04_PGRAPH_BLIMIT5,
NV04_PGRAPH_BPITCH0,
NV04_PGRAPH_BPITCH1,
NV04_PGRAPH_BPITCH2,
NV04_PGRAPH_BPITCH3,
NV04_PGRAPH_BPITCH4,
NV04_PGRAPH_SURFACE,
NV04_PGRAPH_STATE,
NV04_PGRAPH_BSWIZZLE2,
NV04_PGRAPH_BSWIZZLE5,
NV04_PGRAPH_BPIXEL,
NV04_PGRAPH_NOTIFY,
NV04_PGRAPH_PATT_COLOR0,
NV04_PGRAPH_PATT_COLOR1,
NV04_PGRAPH_PATT_COLORRAM+0x00,
NV04_PGRAPH_PATT_COLORRAM+0x04,
NV04_PGRAPH_PATT_COLORRAM+0x08,
NV04_PGRAPH_PATT_COLORRAM+0x0c,
NV04_PGRAPH_PATT_COLORRAM+0x10,
NV04_PGRAPH_PATT_COLORRAM+0x14,
NV04_PGRAPH_PATT_COLORRAM+0x18,
NV04_PGRAPH_PATT_COLORRAM+0x1c,
NV04_PGRAPH_PATT_COLORRAM+0x20,
NV04_PGRAPH_PATT_COLORRAM+0x24,
NV04_PGRAPH_PATT_COLORRAM+0x28,
NV04_PGRAPH_PATT_COLORRAM+0x2c,
NV04_PGRAPH_PATT_COLORRAM+0x30,
NV04_PGRAPH_PATT_COLORRAM+0x34,
NV04_PGRAPH_PATT_COLORRAM+0x38,
NV04_PGRAPH_PATT_COLORRAM+0x3c,
NV04_PGRAPH_PATT_COLORRAM+0x40,
NV04_PGRAPH_PATT_COLORRAM+0x44,
NV04_PGRAPH_PATT_COLORRAM+0x48,
NV04_PGRAPH_PATT_COLORRAM+0x4c,
NV04_PGRAPH_PATT_COLORRAM+0x50,
NV04_PGRAPH_PATT_COLORRAM+0x54,
NV04_PGRAPH_PATT_COLORRAM+0x58,
NV04_PGRAPH_PATT_COLORRAM+0x5c,
NV04_PGRAPH_PATT_COLORRAM+0x60,
NV04_PGRAPH_PATT_COLORRAM+0x64,
NV04_PGRAPH_PATT_COLORRAM+0x68,
NV04_PGRAPH_PATT_COLORRAM+0x6c,
NV04_PGRAPH_PATT_COLORRAM+0x70,
NV04_PGRAPH_PATT_COLORRAM+0x74,
NV04_PGRAPH_PATT_COLORRAM+0x78,
NV04_PGRAPH_PATT_COLORRAM+0x7c,
NV04_PGRAPH_PATT_COLORRAM+0x80,
NV04_PGRAPH_PATT_COLORRAM+0x84,
NV04_PGRAPH_PATT_COLORRAM+0x88,
NV04_PGRAPH_PATT_COLORRAM+0x8c,
NV04_PGRAPH_PATT_COLORRAM+0x90,
NV04_PGRAPH_PATT_COLORRAM+0x94,
NV04_PGRAPH_PATT_COLORRAM+0x98,
NV04_PGRAPH_PATT_COLORRAM+0x9c,
NV04_PGRAPH_PATT_COLORRAM+0xa0,
NV04_PGRAPH_PATT_COLORRAM+0xa4,
NV04_PGRAPH_PATT_COLORRAM+0xa8,
NV04_PGRAPH_PATT_COLORRAM+0xac,
NV04_PGRAPH_PATT_COLORRAM+0xb0,
NV04_PGRAPH_PATT_COLORRAM+0xb4,
NV04_PGRAPH_PATT_COLORRAM+0xb8,
NV04_PGRAPH_PATT_COLORRAM+0xbc,
NV04_PGRAPH_PATT_COLORRAM+0xc0,
NV04_PGRAPH_PATT_COLORRAM+0xc4,
NV04_PGRAPH_PATT_COLORRAM+0xc8,
NV04_PGRAPH_PATT_COLORRAM+0xcc,
NV04_PGRAPH_PATT_COLORRAM+0xd0,
NV04_PGRAPH_PATT_COLORRAM+0xd4,
NV04_PGRAPH_PATT_COLORRAM+0xd8,
NV04_PGRAPH_PATT_COLORRAM+0xdc,
NV04_PGRAPH_PATT_COLORRAM+0xe0,
NV04_PGRAPH_PATT_COLORRAM+0xe4,
NV04_PGRAPH_PATT_COLORRAM+0xe8,
NV04_PGRAPH_PATT_COLORRAM+0xec,
NV04_PGRAPH_PATT_COLORRAM+0xf0,
NV04_PGRAPH_PATT_COLORRAM+0xf4,
NV04_PGRAPH_PATT_COLORRAM+0xf8,
NV04_PGRAPH_PATT_COLORRAM+0xfc,
NV04_PGRAPH_PATTERN,
0x0040080c,
NV04_PGRAPH_PATTERN_SHAPE,
0x00400600,
NV04_PGRAPH_ROP3,
NV04_PGRAPH_CHROMA,
NV04_PGRAPH_BETA_AND,
NV04_PGRAPH_BETA_PREMULT,
NV04_PGRAPH_CONTROL0,
NV04_PGRAPH_CONTROL1,
NV04_PGRAPH_CONTROL2,
NV04_PGRAPH_BLEND,
NV04_PGRAPH_STORED_FMT,
NV04_PGRAPH_SOURCE_COLOR,
0x00400560,
0x00400568,
0x00400564,
0x0040056c,
0x00400400,
0x00400480,
0x00400404,
0x00400484,
0x00400408,
0x00400488,
0x0040040c,
0x0040048c,
0x00400410,
0x00400490,
0x00400414,
0x00400494,
0x00400418,
0x00400498,
0x0040041c,
0x0040049c,
0x00400420,
0x004004a0,
0x00400424,
0x004004a4,
0x00400428,
0x004004a8,
0x0040042c,
0x004004ac,
0x00400430,
0x004004b0,
0x00400434,
0x004004b4,
0x00400438,
0x004004b8,
0x0040043c,
0x004004bc,
0x00400440,
0x004004c0,
0x00400444,
0x004004c4,
0x00400448,
0x004004c8,
0x0040044c,
0x004004cc,
0x00400450,
0x004004d0,
0x00400454,
0x004004d4,
0x00400458,
0x004004d8,
0x0040045c,
0x004004dc,
0x00400460,
0x004004e0,
0x00400464,
0x004004e4,
0x00400468,
0x004004e8,
0x0040046c,
0x004004ec,
0x00400470,
0x004004f0,
0x00400474,
0x004004f4,
0x00400478,
0x004004f8,
0x0040047c,
0x004004fc,
0x00400534,
0x00400538,
0x00400514,
0x00400518,
0x0040051c,
0x00400520,
0x00400524,
0x00400528,
0x0040052c,
0x00400530,
0x00400d00,
0x00400d40,
0x00400d80,
0x00400d04,
0x00400d44,
0x00400d84,
0x00400d08,
0x00400d48,
0x00400d88,
0x00400d0c,
0x00400d4c,
0x00400d8c,
0x00400d10,
0x00400d50,
0x00400d90,
0x00400d14,
0x00400d54,
0x00400d94,
0x00400d18,
0x00400d58,
0x00400d98,
0x00400d1c,
0x00400d5c,
0x00400d9c,
0x00400d20,
0x00400d60,
0x00400da0,
0x00400d24,
0x00400d64,
0x00400da4,
0x00400d28,
0x00400d68,
0x00400da8,
0x00400d2c,
0x00400d6c,
0x00400dac,
0x00400d30,
0x00400d70,
0x00400db0,
0x00400d34,
0x00400d74,
0x00400db4,
0x00400d38,
0x00400d78,
0x00400db8,
0x00400d3c,
0x00400d7c,
0x00400dbc,
0x00400590,
0x00400594,
0x00400598,
0x0040059c,
0x004005a8,
0x004005ac,
0x004005b0,
0x004005b4,
0x004005c0,
0x004005c4,
0x004005c8,
0x004005cc,
0x004005d0,
0x004005d4,
0x004005d8,
0x004005dc,
0x004005e0,
NV04_PGRAPH_PASSTHRU_0,
NV04_PGRAPH_PASSTHRU_1,
NV04_PGRAPH_PASSTHRU_2,
NV04_PGRAPH_DVD_COLORFMT,
NV04_PGRAPH_SCALED_FORMAT,
NV04_PGRAPH_MISC24_0,
NV04_PGRAPH_MISC24_1,
NV04_PGRAPH_MISC24_2,
0x00400500,
0x00400504,
NV04_PGRAPH_VALID1,
NV04_PGRAPH_VALID2,
NV04_PGRAPH_DEBUG_3
};
struct graph_state {
int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
};
struct nouveau_channel *
nv04_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int chid = dev_priv->engine.fifo.channels;
if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
if (chid >= dev_priv->engine.fifo.channels)
return NULL;
return dev_priv->fifos[chid];
}
void
nv04_graph_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_channel *chan = NULL;
int chid;
pgraph->fifo_access(dev, false);
nouveau_wait_for_idle(dev);
/* If previous context is valid, we need to save it */
pgraph->unload_context(dev);
/* Load context for next channel */
chid = dev_priv->engine.fifo.channel_id(dev);
chan = dev_priv->fifos[chid];
if (chan)
nv04_graph_load_context(chan);
pgraph->fifo_access(dev, true);
}
static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
if (nv04_graph_ctx_regs[i] == reg)
return &ctx->nv04[i];
}
return NULL;
}
int nv04_graph_create_context(struct nouveau_channel *chan)
{
struct graph_state *pgraph_ctx;
NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
GFP_KERNEL);
if (pgraph_ctx == NULL)
return -ENOMEM;
*ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
return 0;
}
void nv04_graph_destroy_context(struct nouveau_channel *chan)
{
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
kfree(pgraph_ctx);
chan->pgraph_ctx = NULL;
}
int nv04_graph_load_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
uint32_t tmp;
int i;
for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
return 0;
}
int
nv04_graph_unload_context(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_channel *chan = NULL;
struct graph_state *ctx;
uint32_t tmp;
int i;
chan = pgraph->channel(dev);
if (!chan)
return 0;
ctx = chan->pgraph_ctx;
for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
return 0;
}
int nv04_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
/* Enable PGRAPH interrupts */
nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
/*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
/*1231C000 blob, 001 haiku*/
//*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
/*0x72111100 blob , 01 haiku*/
/*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
/*haiku same*/
/*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
/*haiku and blob 10d4*/
nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
/* These don't belong here, they're part of a per-channel context */
nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
return 0;
}
void nv04_graph_takedown(struct drm_device *dev)
{
}
void
nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
{
if (enabled)
nv_wr32(dev, NV04_PGRAPH_FIFO,
nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
else
nv_wr32(dev, NV04_PGRAPH_FIFO,
nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
}
static int
nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
chan->fence.last_sequence_irq = data;
nouveau_fence_handler(chan->dev, chan->id);
return 0;
}
/*
* Software methods, why they are needed, and how they all work:
*
* NV04 and NV05 keep most of the state in PGRAPH context itself, but some
* 2d engine settings are kept inside the grobjs themselves. The grobjs are
* 3 words long on both. grobj format on NV04 is:
*
* word 0:
* - bits 0-7: class
* - bit 12: color key active
* - bit 13: clip rect active
* - bit 14: if set, destination surface is swizzled and taken from buffer 5
* [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
* from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
* NV03_CONTEXT_SURFACE_DST].
* - bits 15-17: 2d operation [aka patch config]
* - bit 24: patch valid [enables rendering using this object]
* - bit 25: surf3d valid [for tex_tri and multitex_tri only]
* word 1:
* - bits 0-1: mono format
* - bits 8-13: color format
* - bits 16-31: DMA_NOTIFY instance
* word 2:
* - bits 0-15: DMA_A instance
* - bits 16-31: DMA_B instance
*
* On NV05 it's:
*
* word 0:
* - bits 0-7: class
* - bit 12: color key active
* - bit 13: clip rect active
* - bit 14: if set, destination surface is swizzled and taken from buffer 5
* [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
* from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
* NV03_CONTEXT_SURFACE_DST].
* - bits 15-17: 2d operation [aka patch config]
* - bits 20-22: dither mode
* - bit 24: patch valid [enables rendering using this object]
* - bit 25: surface_dst/surface_color/surf2d/surf3d valid
* - bit 26: surface_src/surface_zeta valid
* - bit 27: pattern valid
* - bit 28: rop valid
* - bit 29: beta1 valid
* - bit 30: beta4 valid
* word 1:
* - bits 0-1: mono format
* - bits 8-13: color format
* - bits 16-31: DMA_NOTIFY instance
* word 2:
* - bits 0-15: DMA_A instance
* - bits 16-31: DMA_B instance
*
* NV05 will set/unset the relevant valid bits when you poke the relevant
* object-binding methods with object of the proper type, or with the NULL
* type. It'll only allow rendering using the grobj if all needed objects
* are bound. The needed set of objects depends on selected operation: for
* example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
*
* NV04 doesn't have these methods implemented at all, and doesn't have the
* relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
* is set. So we have to emulate them in software, internally keeping the
* same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
* but the last word isn't actually used for anything, we abuse it for this
* purpose.
*
* Actually, NV05 can optionally check bit 24 too, but we disable this since
* there's no use for it.
*
* For unknown reasons, NV04 implements surf3d binding in hardware as an
* exception. Also for unknown reasons, NV04 doesn't implement the clipping
* methods on the surf3d object, so we have to emulate them too.
*/
static void
nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
uint32_t tmp;
tmp = nv_ri32(dev, instance);
tmp &= ~mask;
tmp |= value;
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
}
static void
nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
uint32_t tmp, ctx1;
int class, op, valid = 1;
ctx1 = nv_ri32(dev, instance);
class = ctx1 & 0xff;
op = (ctx1 >> 15) & 7;
tmp = nv_ri32(dev, instance + 0xc);
tmp &= ~mask;
tmp |= value;
nv_wi32(dev, instance + 0xc, tmp);
/* check for valid surf2d/surf_dst/surf_color */
if (!(tmp & 0x02000000))
valid = 0;
/* check for valid surf_src/surf_zeta */
if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
valid = 0;
switch (op) {
/* SRCCOPY_AND, SRCCOPY: no extra objects required */
case 0:
case 3:
break;
/* ROP_AND: requires pattern and rop */
case 1:
if (!(tmp & 0x18000000))
valid = 0;
break;
/* BLEND_AND: requires beta1 */
case 2:
if (!(tmp & 0x20000000))
valid = 0;
break;
/* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
case 4:
case 5:
if (!(tmp & 0x40000000))
valid = 0;
break;
}
nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
}
static int
nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
if (data > 5)
return 1;
/* Old versions of the objects only accept first three operations. */
if (data > 2 && grclass < 0x40)
return 1;
nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
nv04_graph_set_ctx_val(chan, 0, 0);
return 0;
}
static int
nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
if (min & 0x8000)
/* too large */
return 1;
if (w & 0x8000)
/* yes, it accepts negative for some reason. */
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
nv_wr32(chan->dev, 0x40053c, min);
nv_wr32(chan->dev, 0x400544, max);
return 0;
}
static int
nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
if (min & 0x8000)
/* too large */
return 1;
if (w & 0x8000)
/* yes, it accepts negative for some reason. */
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
nv_wr32(chan->dev, 0x400540, min);
nv_wr32(chan->dev, 0x400548, max);
return 0;
}
static int
nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x42:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x42:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
case 0x52:
nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x08000000, 0);
return 0;
case 0x18:
nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x08000000, 0);
return 0;
case 0x44:
nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x10000000, 0);
return 0;
case 0x43:
nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x20000000, 0);
return 0;
case 0x12:
nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x40000000, 0);
return 0;
case 0x72:
nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x58:
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x04000000, 0);
return 0;
case 0x59:
nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x5a:
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x04000000, 0);
return 0;
case 0x5b:
nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x2000, 0);
return 0;
case 0x19:
nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x1000, 0);
return 0;
/* Yes, for some reason even the old versions of objects
* accept 0x57 and not 0x17. Consistency be damned.
*/
case 0x57:
nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
return 0;
}
return 1;
}
static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
{ 0x0184, nv04_graph_mthd_bind_nv01_patt },
{ 0x0188, nv04_graph_mthd_bind_rop },
{ 0x018c, nv04_graph_mthd_bind_beta1 },
{ 0x0190, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
{ 0x0190, nv04_graph_mthd_bind_rop },
{ 0x0194, nv04_graph_mthd_bind_beta1 },
{ 0x0198, nv04_graph_mthd_bind_surf_dst },
{ 0x019c, nv04_graph_mthd_bind_surf_src },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_nv04_patt },
{ 0x0190, nv04_graph_mthd_bind_rop },
{ 0x0194, nv04_graph_mthd_bind_beta1 },
{ 0x0198, nv04_graph_mthd_bind_beta4 },
{ 0x019c, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
{ 0x0188, nv04_graph_mthd_bind_chroma },
{ 0x018c, nv04_graph_mthd_bind_clip },
{ 0x0190, nv04_graph_mthd_bind_nv04_patt },
{ 0x0194, nv04_graph_mthd_bind_rop },
{ 0x0198, nv04_graph_mthd_bind_beta1 },
{ 0x019c, nv04_graph_mthd_bind_beta4 },
{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
{ 0x03e4, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
{ 0x0190, nv04_graph_mthd_bind_rop },
{ 0x0194, nv04_graph_mthd_bind_beta1 },
{ 0x0198, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x0304, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
{ 0x0304, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
{ 0x0184, nv04_graph_mthd_bind_clip },
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
{ 0x0184, nv04_graph_mthd_bind_clip },
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_surf_color },
{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
{},
};
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
{ 0x0038, false, NULL }, /* dvd subpicture */
{ 0x0039, false, NULL }, /* m2mf */
{ 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
{ 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
{ 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
{ 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
{ 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
{ 0x0064, false, NULL }, /* nv05 iifc */
{ 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
{ 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
{ 0x0065, false, NULL }, /* nv05 ifc */
{ 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
{ 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
{ 0x0066, false, NULL }, /* nv05 sifc */
{ 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
{ 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
{ 0x0030, false, NULL }, /* null */
{ 0x0042, false, NULL }, /* surf2d */
{ 0x0043, false, NULL }, /* rop */
{ 0x0012, false, NULL }, /* beta1 */
{ 0x0072, false, NULL }, /* beta4 */
{ 0x0019, false, NULL }, /* cliprect */
{ 0x0018, false, NULL }, /* nv01 pattern */
{ 0x0044, false, NULL }, /* nv04 pattern */
{ 0x0052, false, NULL }, /* swzsurf */
{ 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
{ 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
{ 0x0017, false, NULL }, /* nv01 chroma */
{ 0x0057, false, NULL }, /* nv04 chroma */
{ 0x0058, false, NULL }, /* surf_dst */
{ 0x0059, false, NULL }, /* surf_src */
{ 0x005a, false, NULL }, /* surf_color */
{ 0x005b, false, NULL }, /* surf_zeta */
{ 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
{ 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
{ 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
{ 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
{ 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
{ 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
{ 0x506e, true, nv04_graph_mthds_sw },
{}
};
| gpl-2.0 |
mantera/WX_435_Kernel-CM7 | drivers/isdn/isdnloop/isdnloop.c | 765 | 39554 | /* $Id: isdnloop.c,v 1.11.6.7 2001/11/11 19:54:31 kai Exp $
*
* ISDN low-level module implementing a dummy loop driver.
*
* Copyright 1997 by Fritz Elfert (fritz@isdn4linux.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "isdnloop.h"
static char *revision = "$Revision: 1.11.6.7 $";
static char *isdnloop_id = "loop0";
MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
MODULE_AUTHOR("Fritz Elfert");
MODULE_LICENSE("GPL");
module_param(isdnloop_id, charp, 0);
MODULE_PARM_DESC(isdnloop_id, "ID-String of first card");
static int isdnloop_addcard(char *);
/*
* Free queue completely.
*
* Parameter:
* card = pointer to card struct
* channel = channel number
*/
static void
isdnloop_free_queue(isdnloop_card * card, int channel)
{
struct sk_buff_head *queue = &card->bqueue[channel];
skb_queue_purge(queue);
card->sndcount[channel] = 0;
}
/*
* Send B-Channel data to another virtual card.
* This routine is called via timer-callback from isdnloop_pollbchan().
*
* Parameter:
* card = pointer to card struct.
* ch = channel number (0-based)
*/
static void
isdnloop_bchan_send(isdnloop_card * card, int ch)
{
isdnloop_card *rcard = card->rcard[ch];
int rch = card->rch[ch], len, ack;
struct sk_buff *skb;
isdn_ctrl cmd;
while (card->sndcount[ch]) {
if ((skb = skb_dequeue(&card->bqueue[ch]))) {
len = skb->len;
card->sndcount[ch] -= len;
ack = *(skb->head); /* used as scratch area */
cmd.driver = card->myid;
cmd.arg = ch;
if (rcard){
rcard->interface.rcvcallb_skb(rcard->myid, rch, skb);
} else {
printk(KERN_WARNING "isdnloop: no rcard, skb dropped\n");
dev_kfree_skb(skb);
};
cmd.command = ISDN_STAT_BSENT;
cmd.parm.length = len;
card->interface.statcallb(&cmd);
} else
card->sndcount[ch] = 0;
}
}
/*
* Send/Receive Data to/from the B-Channel.
* This routine is called via timer-callback.
* It schedules itself while any B-Channel is open.
*
* Parameter:
* data = pointer to card struct, set by kernel timer.data
*/
static void
isdnloop_pollbchan(unsigned long data)
{
isdnloop_card *card = (isdnloop_card *) data;
unsigned long flags;
if (card->flags & ISDNLOOP_FLAGS_B1ACTIVE)
isdnloop_bchan_send(card, 0);
if (card->flags & ISDNLOOP_FLAGS_B2ACTIVE)
isdnloop_bchan_send(card, 1);
if (card->flags & (ISDNLOOP_FLAGS_B1ACTIVE | ISDNLOOP_FLAGS_B2ACTIVE)) {
/* schedule b-channel polling again */
spin_lock_irqsave(&card->isdnloop_lock, flags);
card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
add_timer(&card->rb_timer);
card->flags |= ISDNLOOP_FLAGS_RBTIMER;
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
} else
card->flags &= ~ISDNLOOP_FLAGS_RBTIMER;
}
/*
* Parse ICN-type setup string and fill fields of setup-struct
* with parsed data.
*
* Parameter:
* setup = setup string, format: [caller-id],si1,si2,[called-id]
* cmd = pointer to struct to be filled.
*/
static void
isdnloop_parse_setup(char *setup, isdn_ctrl * cmd)
{
char *t = setup;
char *s = strchr(t, ',');
*s++ = '\0';
strlcpy(cmd->parm.setup.phone, t, sizeof(cmd->parm.setup.phone));
s = strchr(t = s, ',');
*s++ = '\0';
if (!strlen(t))
cmd->parm.setup.si1 = 0;
else
cmd->parm.setup.si1 = simple_strtoul(t, NULL, 10);
s = strchr(t = s, ',');
*s++ = '\0';
if (!strlen(t))
cmd->parm.setup.si2 = 0;
else
cmd->parm.setup.si2 =
simple_strtoul(t, NULL, 10);
strlcpy(cmd->parm.setup.eazmsn, s, sizeof(cmd->parm.setup.eazmsn));
cmd->parm.setup.plan = 0;
cmd->parm.setup.screen = 0;
}
typedef struct isdnloop_stat {
char *statstr;
int command;
int action;
} isdnloop_stat;
/* *INDENT-OFF* */
static isdnloop_stat isdnloop_stat_table[] =
{
{"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
{"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
{"DCON_", ISDN_STAT_DCONN, 0}, /* D-Channel connected */
{"DDIS_", ISDN_STAT_DHUP, 0}, /* D-Channel disconnected */
{"DCAL_I", ISDN_STAT_ICALL, 3}, /* Incoming call dialup-line */
{"DSCA_I", ISDN_STAT_ICALL, 3}, /* Incoming call 1TR6-SPV */
{"FCALL", ISDN_STAT_ICALL, 4}, /* Leased line connection up */
{"CIF", ISDN_STAT_CINF, 5}, /* Charge-info, 1TR6-type */
{"AOC", ISDN_STAT_CINF, 6}, /* Charge-info, DSS1-type */
{"CAU", ISDN_STAT_CAUSE, 7}, /* Cause code */
{"TEI OK", ISDN_STAT_RUN, 0}, /* Card connected to wallplug */
{"E_L1: ACT FAIL", ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
{"E_L2: DATA LIN", ISDN_STAT_BHUP, 8}, /* Layer-2 data link lost */
{"E_L1: ACTIVATION FAILED",
ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
{NULL, 0, -1}
};
/* *INDENT-ON* */
/*
* Parse Status message-strings from virtual card.
* Depending on status, call statcallb for sending messages to upper
* levels. Also set/reset B-Channel active-flags.
*
* Parameter:
* status = status string to parse.
* channel = channel where message comes from.
* card = card where message comes from.
*/
static void
isdnloop_parse_status(u_char * status, int channel, isdnloop_card * card)
{
isdnloop_stat *s = isdnloop_stat_table;
int action = -1;
isdn_ctrl cmd;
while (s->statstr) {
if (!strncmp(status, s->statstr, strlen(s->statstr))) {
cmd.command = s->command;
action = s->action;
break;
}
s++;
}
if (action == -1)
return;
cmd.driver = card->myid;
cmd.arg = channel;
switch (action) {
case 1:
/* BCON_x */
card->flags |= (channel) ?
ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE;
break;
case 2:
/* BDIS_x */
card->flags &= ~((channel) ?
ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE);
isdnloop_free_queue(card, channel);
break;
case 3:
/* DCAL_I and DSCA_I */
isdnloop_parse_setup(status + 6, &cmd);
break;
case 4:
/* FCALL */
sprintf(cmd.parm.setup.phone, "LEASED%d", card->myid);
sprintf(cmd.parm.setup.eazmsn, "%d", channel + 1);
cmd.parm.setup.si1 = 7;
cmd.parm.setup.si2 = 0;
cmd.parm.setup.plan = 0;
cmd.parm.setup.screen = 0;
break;
case 5:
/* CIF */
strlcpy(cmd.parm.num, status + 3, sizeof(cmd.parm.num));
break;
case 6:
/* AOC */
snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%d",
(int) simple_strtoul(status + 7, NULL, 16));
break;
case 7:
/* CAU */
status += 3;
if (strlen(status) == 4)
snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%s%c%c",
status + 2, *status, *(status + 1));
else
strlcpy(cmd.parm.num, status + 1, sizeof(cmd.parm.num));
break;
case 8:
/* Misc Errors on L1 and L2 */
card->flags &= ~ISDNLOOP_FLAGS_B1ACTIVE;
isdnloop_free_queue(card, 0);
cmd.arg = 0;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_DHUP;
cmd.arg = 0;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_BHUP;
card->flags &= ~ISDNLOOP_FLAGS_B2ACTIVE;
isdnloop_free_queue(card, 1);
cmd.arg = 1;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
cmd.command = ISDN_STAT_DHUP;
cmd.arg = 1;
cmd.driver = card->myid;
break;
}
card->interface.statcallb(&cmd);
}
/*
* Store a cwcharacter into ringbuffer for reading from /dev/isdnctrl
*
* Parameter:
* card = pointer to card struct.
* c = char to store.
*/
static void
isdnloop_putmsg(isdnloop_card * card, unsigned char c)
{
ulong flags;
spin_lock_irqsave(&card->isdnloop_lock, flags);
*card->msg_buf_write++ = (c == 0xff) ? '\n' : c;
if (card->msg_buf_write == card->msg_buf_read) {
if (++card->msg_buf_read > card->msg_buf_end)
card->msg_buf_read = card->msg_buf;
}
if (card->msg_buf_write > card->msg_buf_end)
card->msg_buf_write = card->msg_buf;
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
/*
* Poll a virtual cards message queue.
* If there are new status-replies from the card, copy them to
* ringbuffer for reading on /dev/isdnctrl and call
* isdnloop_parse_status() for processing them. Watch for special
* Firmware bootmessage and parse it, to get the D-Channel protocol.
* If there are B-Channels open, initiate a timer-callback to
* isdnloop_pollbchan().
* This routine is called periodically via timer interrupt.
*
* Parameter:
* data = pointer to card struct
*/
static void
isdnloop_polldchan(unsigned long data)
{
isdnloop_card *card = (isdnloop_card *) data;
struct sk_buff *skb;
int avail;
int left;
u_char c;
int ch;
unsigned long flags;
u_char *p;
isdn_ctrl cmd;
if ((skb = skb_dequeue(&card->dqueue)))
avail = skb->len;
else
avail = 0;
for (left = avail; left > 0; left--) {
c = *skb->data;
skb_pull(skb, 1);
isdnloop_putmsg(card, c);
card->imsg[card->iptr] = c;
if (card->iptr < 59)
card->iptr++;
if (!skb->len) {
avail++;
isdnloop_putmsg(card, '\n');
card->imsg[card->iptr] = 0;
card->iptr = 0;
if (card->imsg[0] == '0' && card->imsg[1] >= '0' &&
card->imsg[1] <= '2' && card->imsg[2] == ';') {
ch = (card->imsg[1] - '0') - 1;
p = &card->imsg[3];
isdnloop_parse_status(p, ch, card);
} else {
p = card->imsg;
if (!strncmp(p, "DRV1.", 5)) {
printk(KERN_INFO "isdnloop: (%s) %s\n", CID, p);
if (!strncmp(p + 7, "TC", 2)) {
card->ptype = ISDN_PTYPE_1TR6;
card->interface.features |= ISDN_FEATURE_P_1TR6;
printk(KERN_INFO
"isdnloop: (%s) 1TR6-Protocol loaded and running\n", CID);
}
if (!strncmp(p + 7, "EC", 2)) {
card->ptype = ISDN_PTYPE_EURO;
card->interface.features |= ISDN_FEATURE_P_EURO;
printk(KERN_INFO
"isdnloop: (%s) Euro-Protocol loaded and running\n", CID);
}
continue;
}
}
}
}
if (avail) {
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = avail;
card->interface.statcallb(&cmd);
}
if (card->flags & (ISDNLOOP_FLAGS_B1ACTIVE | ISDNLOOP_FLAGS_B2ACTIVE))
if (!(card->flags & ISDNLOOP_FLAGS_RBTIMER)) {
/* schedule b-channel polling */
card->flags |= ISDNLOOP_FLAGS_RBTIMER;
spin_lock_irqsave(&card->isdnloop_lock, flags);
del_timer(&card->rb_timer);
card->rb_timer.function = isdnloop_pollbchan;
card->rb_timer.data = (unsigned long) card;
card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
add_timer(&card->rb_timer);
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
/* schedule again */
spin_lock_irqsave(&card->isdnloop_lock, flags);
card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
add_timer(&card->st_timer);
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
/*
* Append a packet to the transmit buffer-queue.
*
* Parameter:
* channel = Number of B-channel
* skb = packet to send.
* card = pointer to card-struct
* Return:
* Number of bytes transferred, -E??? on error
*/
static int
isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
{
int len = skb->len;
unsigned long flags;
struct sk_buff *nskb;
if (len > 4000) {
printk(KERN_WARNING
"isdnloop: Send packet too large\n");
return -EINVAL;
}
if (len) {
if (!(card->flags & (channel) ? ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE))
return 0;
if (card->sndcount[channel] > ISDNLOOP_MAX_SQUEUE)
return 0;
spin_lock_irqsave(&card->isdnloop_lock, flags);
nskb = dev_alloc_skb(skb->len);
if (nskb) {
skb_copy_from_linear_data(skb,
skb_put(nskb, len), len);
skb_queue_tail(&card->bqueue[channel], nskb);
dev_kfree_skb(skb);
} else
len = 0;
card->sndcount[channel] += len;
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
return len;
}
/*
* Read the messages from the card's ringbuffer
*
* Parameter:
* buf = pointer to buffer.
* len = number of bytes to read.
* user = flag, 1: called from userlevel 0: called from kernel.
* card = pointer to card struct.
* Return:
* number of bytes actually transferred.
*/
static int
isdnloop_readstatus(u_char __user *buf, int len, isdnloop_card * card)
{
int count;
u_char __user *p;
for (p = buf, count = 0; count < len; p++, count++) {
if (card->msg_buf_read == card->msg_buf_write)
return count;
if (put_user(*card->msg_buf_read++, p))
return -EFAULT;
if (card->msg_buf_read > card->msg_buf_end)
card->msg_buf_read = card->msg_buf;
}
return count;
}
/*
* Simulate a card's response by appending it to the cards
* message queue.
*
* Parameter:
* card = pointer to card struct.
* s = pointer to message-string.
* ch = channel: 0 = generic messages, 1 and 2 = D-channel messages.
* Return:
* 0 on success, 1 on memory squeeze.
*/
static int
isdnloop_fake(isdnloop_card * card, char *s, int ch)
{
struct sk_buff *skb;
int len = strlen(s) + ((ch >= 0) ? 3 : 0);
if (!(skb = dev_alloc_skb(len))) {
printk(KERN_WARNING "isdnloop: Out of memory in isdnloop_fake\n");
return 1;
}
if (ch >= 0)
sprintf(skb_put(skb, 3), "%02d;", ch);
memcpy(skb_put(skb, strlen(s)), s, strlen(s));
skb_queue_tail(&card->dqueue, skb);
return 0;
}
/* *INDENT-OFF* */
static isdnloop_stat isdnloop_cmd_table[] =
{
{"BCON_R", 0, 1}, /* B-Channel connect */
{"BCON_I", 0, 17}, /* B-Channel connect ind */
{"BDIS_R", 0, 2}, /* B-Channel disconnect */
{"DDIS_R", 0, 3}, /* D-Channel disconnect */
{"DCON_R", 0, 16}, /* D-Channel connect */
{"DSCA_R", 0, 4}, /* Dial 1TR6-SPV */
{"DCAL_R", 0, 5}, /* Dial */
{"EAZC", 0, 6}, /* Clear EAZ listener */
{"EAZ", 0, 7}, /* Set EAZ listener */
{"SEEAZ", 0, 8}, /* Get EAZ listener */
{"MSN", 0, 9}, /* Set/Clear MSN listener */
{"MSALL", 0, 10}, /* Set multi MSN listeners */
{"SETSIL", 0, 11}, /* Set SI list */
{"SEESIL", 0, 12}, /* Get SI list */
{"SILC", 0, 13}, /* Clear SI list */
{"LOCK", 0, -1}, /* LOCK channel */
{"UNLOCK", 0, -1}, /* UNLOCK channel */
{"FV2ON", 1, 14}, /* Leased mode on */
{"FV2OFF", 1, 15}, /* Leased mode off */
{NULL, 0, -1}
};
/* *INDENT-ON* */
/*
* Simulate an error-response from a card.
*
* Parameter:
* card = pointer to card struct.
*/
static void
isdnloop_fake_err(isdnloop_card * card)
{
char buf[60];
sprintf(buf, "E%s", card->omsg);
isdnloop_fake(card, buf, -1);
isdnloop_fake(card, "NAK", -1);
}
static u_char ctable_eu[] =
{0x00, 0x11, 0x01, 0x12};
static u_char ctable_1t[] =
{0x00, 0x3b, 0x01, 0x3a};
/*
* Assemble a simplified cause message depending on the
* D-channel protocol used.
*
* Parameter:
* card = pointer to card struct.
* loc = location: 0 = local, 1 = remote.
* cau = cause: 1 = busy, 2 = nonexistent callerid, 3 = no user responding.
* Return:
* Pointer to buffer containing the assembled message.
*/
static char *
isdnloop_unicause(isdnloop_card * card, int loc, int cau)
{
static char buf[6];
switch (card->ptype) {
case ISDN_PTYPE_EURO:
sprintf(buf, "E%02X%02X", (loc) ? 4 : 2, ctable_eu[cau]);
break;
case ISDN_PTYPE_1TR6:
sprintf(buf, "%02X44", ctable_1t[cau]);
break;
default:
return ("0000");
}
return (buf);
}
/*
* Release a virtual connection. Called from timer interrupt, when
* called party did not respond.
*
* Parameter:
* card = pointer to card struct.
* ch = channel (0-based)
*/
static void
isdnloop_atimeout(isdnloop_card * card, int ch)
{
unsigned long flags;
char buf[60];
spin_lock_irqsave(&card->isdnloop_lock, flags);
if (card->rcard) {
isdnloop_fake(card->rcard[ch], "DDIS_I", card->rch[ch] + 1);
card->rcard[ch]->rcard[card->rch[ch]] = NULL;
card->rcard[ch] = NULL;
}
isdnloop_fake(card, "DDIS_I", ch + 1);
/* No user responding */
sprintf(buf, "CAU%s", isdnloop_unicause(card, 1, 3));
isdnloop_fake(card, buf, ch + 1);
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
/*
* Wrapper for isdnloop_atimeout().
*/
static void
isdnloop_atimeout0(unsigned long data)
{
isdnloop_card *card = (isdnloop_card *) data;
isdnloop_atimeout(card, 0);
}
/*
* Wrapper for isdnloop_atimeout().
*/
static void
isdnloop_atimeout1(unsigned long data)
{
isdnloop_card *card = (isdnloop_card *) data;
isdnloop_atimeout(card, 1);
}
/*
* Install a watchdog for a user, not responding.
*
* Parameter:
* card = pointer to card struct.
* ch = channel to watch for.
*/
static void
isdnloop_start_ctimer(isdnloop_card * card, int ch)
{
unsigned long flags;
spin_lock_irqsave(&card->isdnloop_lock, flags);
init_timer(&card->c_timer[ch]);
card->c_timer[ch].expires = jiffies + ISDNLOOP_TIMER_ALERTWAIT;
if (ch)
card->c_timer[ch].function = isdnloop_atimeout1;
else
card->c_timer[ch].function = isdnloop_atimeout0;
card->c_timer[ch].data = (unsigned long) card;
add_timer(&card->c_timer[ch]);
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
/*
* Kill a pending channel watchdog.
*
* Parameter:
* card = pointer to card struct.
* ch = channel (0-based).
*/
static void
isdnloop_kill_ctimer(isdnloop_card * card, int ch)
{
unsigned long flags;
spin_lock_irqsave(&card->isdnloop_lock, flags);
del_timer(&card->c_timer[ch]);
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
static u_char si2bit[] =
{0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
static u_char bit2si[] =
{1, 5, 7};
/*
* Try finding a listener for an outgoing call.
*
* Parameter:
* card = pointer to calling card.
* p = pointer to ICN-type setup-string.
* lch = channel of calling card.
* cmd = pointer to struct to be filled when parsing setup.
* Return:
* 0 = found match, alerting should happen.
* 1 = found matching number but it is busy.
* 2 = no matching listener.
* 3 = found matching number but SI does not match.
*/
static int
isdnloop_try_call(isdnloop_card * card, char *p, int lch, isdn_ctrl * cmd)
{
isdnloop_card *cc = cards;
unsigned long flags;
int ch;
int num_match;
int i;
char *e;
char nbuf[32];
isdnloop_parse_setup(p, cmd);
while (cc) {
for (ch = 0; ch < 2; ch++) {
/* Exclude ourself */
if ((cc == card) && (ch == lch))
continue;
num_match = 0;
switch (cc->ptype) {
case ISDN_PTYPE_EURO:
for (i = 0; i < 3; i++)
if (!(strcmp(cc->s0num[i], cmd->parm.setup.phone)))
num_match = 1;
break;
case ISDN_PTYPE_1TR6:
e = cc->eazlist[ch];
while (*e) {
sprintf(nbuf, "%s%c", cc->s0num[0], *e);
if (!(strcmp(nbuf, cmd->parm.setup.phone)))
num_match = 1;
e++;
}
}
if (num_match) {
spin_lock_irqsave(&card->isdnloop_lock, flags);
/* channel idle? */
if (!(cc->rcard[ch])) {
/* Check SI */
if (!(si2bit[cmd->parm.setup.si1] & cc->sil[ch])) {
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return 3;
}
/* ch is idle, si and number matches */
cc->rcard[ch] = card;
cc->rch[ch] = lch;
card->rcard[lch] = cc;
card->rch[lch] = ch;
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return 0;
} else {
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
/* num matches, but busy */
if (ch == 1)
return 1;
}
}
}
cc = cc->next;
}
return 2;
}
/*
* Depending on D-channel protocol and caller/called, modify
* phone number.
*
* Parameter:
* card = pointer to card struct.
* phone = pointer phone number.
* caller = flag: 1 = caller, 0 = called.
* Return:
* pointer to new phone number.
*/
static char *
isdnloop_vstphone(isdnloop_card * card, char *phone, int caller)
{
int i;
static char nphone[30];
if (!card) {
printk("BUG!!!\n");
return "";
}
switch (card->ptype) {
case ISDN_PTYPE_EURO:
if (caller) {
for (i = 0; i < 2; i++)
if (!(strcmp(card->s0num[i], phone)))
return (phone);
return (card->s0num[0]);
}
return (phone);
break;
case ISDN_PTYPE_1TR6:
if (caller) {
sprintf(nphone, "%s%c", card->s0num[0], phone[0]);
return (nphone);
} else
return (&phone[strlen(phone) - 1]);
break;
}
return "";
}
/*
* Parse an ICN-type command string sent to the 'card'.
* Perform misc. actions depending on the command.
*
* Parameter:
* card = pointer to card struct.
*/
static void
isdnloop_parse_cmd(isdnloop_card * card)
{
char *p = card->omsg;
isdn_ctrl cmd;
char buf[60];
isdnloop_stat *s = isdnloop_cmd_table;
int action = -1;
int i;
int ch;
if ((card->omsg[0] != '0') && (card->omsg[2] != ';')) {
isdnloop_fake_err(card);
return;
}
ch = card->omsg[1] - '0';
if ((ch < 0) || (ch > 2)) {
isdnloop_fake_err(card);
return;
}
p += 3;
while (s->statstr) {
if (!strncmp(p, s->statstr, strlen(s->statstr))) {
action = s->action;
if (s->command && (ch != 0)) {
isdnloop_fake_err(card);
return;
}
break;
}
s++;
}
if (action == -1)
return;
switch (action) {
case 1:
/* 0x;BCON_R */
if (card->rcard[ch - 1]) {
isdnloop_fake(card->rcard[ch - 1], "BCON_I",
card->rch[ch - 1] + 1);
isdnloop_fake(card, "BCON_C", ch);
}
break;
case 17:
/* 0x;BCON_I */
if (card->rcard[ch - 1]) {
isdnloop_fake(card->rcard[ch - 1], "BCON_C",
card->rch[ch - 1] + 1);
}
break;
case 2:
/* 0x;BDIS_R */
isdnloop_fake(card, "BDIS_C", ch);
if (card->rcard[ch - 1]) {
isdnloop_fake(card->rcard[ch - 1], "BDIS_I",
card->rch[ch - 1] + 1);
}
break;
case 16:
/* 0x;DCON_R */
isdnloop_kill_ctimer(card, ch - 1);
if (card->rcard[ch - 1]) {
isdnloop_kill_ctimer(card->rcard[ch - 1], card->rch[ch - 1]);
isdnloop_fake(card->rcard[ch - 1], "DCON_C",
card->rch[ch - 1] + 1);
isdnloop_fake(card, "DCON_C", ch);
}
break;
case 3:
/* 0x;DDIS_R */
isdnloop_kill_ctimer(card, ch - 1);
if (card->rcard[ch - 1]) {
isdnloop_kill_ctimer(card->rcard[ch - 1], card->rch[ch - 1]);
isdnloop_fake(card->rcard[ch - 1], "DDIS_I",
card->rch[ch - 1] + 1);
card->rcard[ch - 1] = NULL;
}
isdnloop_fake(card, "DDIS_C", ch);
break;
case 4:
/* 0x;DSCA_Rdd,yy,zz,oo */
if (card->ptype != ISDN_PTYPE_1TR6) {
isdnloop_fake_err(card);
return;
}
/* Fall through */
case 5:
/* 0x;DCAL_Rdd,yy,zz,oo */
p += 6;
switch (isdnloop_try_call(card, p, ch - 1, &cmd)) {
case 0:
/* Alerting */
sprintf(buf, "D%s_I%s,%02d,%02d,%s",
(action == 4) ? "SCA" : "CAL",
isdnloop_vstphone(card, cmd.parm.setup.eazmsn, 1),
cmd.parm.setup.si1,
cmd.parm.setup.si2,
isdnloop_vstphone(card->rcard[ch - 1],
cmd.parm.setup.phone, 0));
isdnloop_fake(card->rcard[ch - 1], buf, card->rch[ch - 1] + 1);
/* Fall through */
case 3:
/* si1 does not match, don't alert but start timer */
isdnloop_start_ctimer(card, ch - 1);
break;
case 1:
/* Remote busy */
isdnloop_fake(card, "DDIS_I", ch);
sprintf(buf, "CAU%s", isdnloop_unicause(card, 1, 1));
isdnloop_fake(card, buf, ch);
break;
case 2:
/* No such user */
isdnloop_fake(card, "DDIS_I", ch);
sprintf(buf, "CAU%s", isdnloop_unicause(card, 1, 2));
isdnloop_fake(card, buf, ch);
break;
}
break;
case 6:
/* 0x;EAZC */
card->eazlist[ch - 1][0] = '\0';
break;
case 7:
/* 0x;EAZ */
p += 3;
strcpy(card->eazlist[ch - 1], p);
break;
case 8:
/* 0x;SEEAZ */
sprintf(buf, "EAZ-LIST: %s", card->eazlist[ch - 1]);
isdnloop_fake(card, buf, ch + 1);
break;
case 9:
/* 0x;MSN */
break;
case 10:
/* 0x;MSNALL */
break;
case 11:
/* 0x;SETSIL */
p += 6;
i = 0;
while (strchr("0157", *p)) {
if (i)
card->sil[ch - 1] |= si2bit[*p - '0'];
i = (*p++ == '0');
}
if (*p)
isdnloop_fake_err(card);
break;
case 12:
/* 0x;SEESIL */
sprintf(buf, "SIN-LIST: ");
p = buf + 10;
for (i = 0; i < 3; i++)
if (card->sil[ch - 1] & (1 << i))
p += sprintf(p, "%02d", bit2si[i]);
isdnloop_fake(card, buf, ch + 1);
break;
case 13:
/* 0x;SILC */
card->sil[ch - 1] = 0;
break;
case 14:
/* 00;FV2ON */
break;
case 15:
/* 00;FV2OFF */
break;
}
}
/*
* Put command-strings into the of the 'card'. In reality, execute them
* right in place by calling isdnloop_parse_cmd(). Also copy every
* command to the read message ringbuffer, preceeding it with a '>'.
* These mesagges can be read at /dev/isdnctrl.
*
* Parameter:
* buf = pointer to command buffer.
* len = length of buffer data.
* user = flag: 1 = called form userlevel, 0 called from kernel.
* card = pointer to card struct.
* Return:
* number of bytes transferred (currently always equals len).
*/
static int
isdnloop_writecmd(const u_char * buf, int len, int user, isdnloop_card * card)
{
int xcount = 0;
int ocount = 1;
isdn_ctrl cmd;
while (len) {
int count = len;
u_char *p;
u_char msg[0x100];
if (count > 255)
count = 255;
if (user) {
if (copy_from_user(msg, buf, count))
return -EFAULT;
} else
memcpy(msg, buf, count);
isdnloop_putmsg(card, '>');
for (p = msg; count > 0; count--, p++) {
len--;
xcount++;
isdnloop_putmsg(card, *p);
card->omsg[card->optr] = *p;
if (*p == '\n') {
card->omsg[card->optr] = '\0';
card->optr = 0;
isdnloop_parse_cmd(card);
if (len) {
isdnloop_putmsg(card, '>');
ocount++;
}
} else {
if (card->optr < 59)
card->optr++;
}
ocount++;
}
}
cmd.command = ISDN_STAT_STAVAIL;
cmd.driver = card->myid;
cmd.arg = ocount;
card->interface.statcallb(&cmd);
return xcount;
}
/*
* Delete card's pending timers, send STOP to linklevel
*/
static void
isdnloop_stopcard(isdnloop_card * card)
{
unsigned long flags;
isdn_ctrl cmd;
spin_lock_irqsave(&card->isdnloop_lock, flags);
if (card->flags & ISDNLOOP_FLAGS_RUNNING) {
card->flags &= ~ISDNLOOP_FLAGS_RUNNING;
del_timer(&card->st_timer);
del_timer(&card->rb_timer);
del_timer(&card->c_timer[0]);
del_timer(&card->c_timer[1]);
cmd.command = ISDN_STAT_STOP;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
}
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
/*
* Stop all cards before unload.
*/
static void
isdnloop_stopallcards(void)
{
isdnloop_card *p = cards;
while (p) {
isdnloop_stopcard(p);
p = p->next;
}
}
/*
* Start a 'card'. Simulate card's boot message and set the phone
* number(s) of the virtual 'S0-Interface'. Install D-channel
* poll timer.
*
* Parameter:
* card = pointer to card struct.
* sdefp = pointer to struct holding ioctl parameters.
* Return:
* 0 on success, -E??? otherwise.
*/
static int
isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
{
unsigned long flags;
isdnloop_sdef sdef;
int i;
if (card->flags & ISDNLOOP_FLAGS_RUNNING)
return -EBUSY;
if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
return -EFAULT;
spin_lock_irqsave(&card->isdnloop_lock, flags);
switch (sdef.ptype) {
case ISDN_PTYPE_EURO:
if (isdnloop_fake(card, "DRV1.23EC-Q.931-CAPI-CNS-BASIS-20.02.96",
-1)) {
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM;
}
card->sil[0] = card->sil[1] = 4;
if (isdnloop_fake(card, "TEI OK", 0)) {
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM;
}
for (i = 0; i < 3; i++)
strcpy(card->s0num[i], sdef.num[i]);
break;
case ISDN_PTYPE_1TR6:
if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
-1)) {
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM;
}
card->sil[0] = card->sil[1] = 4;
if (isdnloop_fake(card, "TEI OK", 0)) {
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return -ENOMEM;
}
strcpy(card->s0num[0], sdef.num[0]);
card->s0num[1][0] = '\0';
card->s0num[2][0] = '\0';
break;
default:
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
printk(KERN_WARNING "isdnloop: Illegal D-channel protocol %d\n",
sdef.ptype);
return -EINVAL;
}
init_timer(&card->st_timer);
card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
card->st_timer.function = isdnloop_polldchan;
card->st_timer.data = (unsigned long) card;
add_timer(&card->st_timer);
card->flags |= ISDNLOOP_FLAGS_RUNNING;
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
return 0;
}
/*
* Main handler for commands sent by linklevel.
*/
static int
isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
{
ulong a;
int i;
char cbuf[60];
isdn_ctrl cmd;
isdnloop_cdef cdef;
switch (c->command) {
case ISDN_CMD_IOCTL:
memcpy(&a, c->parm.num, sizeof(ulong));
switch (c->arg) {
case ISDNLOOP_IOCTL_DEBUGVAR:
return (ulong) card;
case ISDNLOOP_IOCTL_STARTUP:
if (!access_ok(VERIFY_READ, (void *) a, sizeof(isdnloop_sdef)))
return -EFAULT;
return (isdnloop_start(card, (isdnloop_sdef *) a));
break;
case ISDNLOOP_IOCTL_ADDCARD:
if (copy_from_user((char *)&cdef,
(char *)a,
sizeof(cdef)))
return -EFAULT;
return (isdnloop_addcard(cdef.id1));
break;
case ISDNLOOP_IOCTL_LEASEDCFG:
if (a) {
if (!card->leased) {
card->leased = 1;
while (card->ptype == ISDN_PTYPE_UNKNOWN)
schedule_timeout_interruptible(10);
schedule_timeout_interruptible(10);
sprintf(cbuf, "00;FV2ON\n01;EAZ1\n02;EAZ2\n");
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
printk(KERN_INFO
"isdnloop: (%s) Leased-line mode enabled\n",
CID);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
cmd.arg = 0;
card->interface.statcallb(&cmd);
}
} else {
if (card->leased) {
card->leased = 0;
sprintf(cbuf, "00;FV2OFF\n");
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
printk(KERN_INFO
"isdnloop: (%s) Leased-line mode disabled\n",
CID);
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
cmd.arg = 0;
card->interface.statcallb(&cmd);
}
}
return 0;
default:
return -EINVAL;
}
break;
case ISDN_CMD_DIAL:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if ((c->arg & 255) < ISDNLOOP_BCH) {
char *p;
char dial[50];
char dcode[4];
a = c->arg;
p = c->parm.setup.phone;
if (*p == 's' || *p == 'S') {
/* Dial for SPV */
p++;
strcpy(dcode, "SCA");
} else
/* Normal Dial */
strcpy(dcode, "CAL");
strcpy(dial, p);
sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
dcode, dial, c->parm.setup.si1,
c->parm.setup.si2, c->parm.setup.eazmsn);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTD:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
cbuf[0] = 0;
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BX75\n", (int) a);
break;
#ifdef CONFIG_ISDN_X25
case ISDN_PROTO_L2_X25DTE:
sprintf(cbuf, "%02d;BX2T\n", (int) a);
break;
case ISDN_PROTO_L2_X25DCE:
sprintf(cbuf, "%02d;BX2C\n", (int) a);
break;
#endif
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BTRA\n", (int) a);
break;
}
if (strlen(cbuf))
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
sprintf(cbuf, "%02d;DCON_R\n", (int) a);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_ACCEPTB:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
switch (card->l2_proto[a - 1]) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BCON_R,BX75\n", (int) a);
break;
#ifdef CONFIG_ISDN_X25
case ISDN_PROTO_L2_X25DTE:
sprintf(cbuf, "%02d;BCON_R,BX2T\n", (int) a);
break;
case ISDN_PROTO_L2_X25DCE:
sprintf(cbuf, "%02d;BCON_R,BX2C\n", (int) a);
break;
#endif
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BCON_R,BTRA\n", (int) a);
break;
default:
sprintf(cbuf, "%02d;BCON_R\n", (int) a);
}
printk(KERN_DEBUG "isdnloop writecmd '%s'\n", cbuf);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
break;
case ISDN_CMD_HANGUP:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
sprintf(cbuf, "%02d;BDIS_R\n%02d;DDIS_R\n", (int) a, (int) a);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETEAZ:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO) {
sprintf(cbuf, "%02d;MS%s%s\n", (int) a,
c->parm.num[0] ? "N" : "ALL", c->parm.num);
} else
sprintf(cbuf, "%02d;EAZ%s\n", (int) a,
c->parm.num[0] ? c->parm.num : (u_char *) "0123456789");
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_CLREAZ:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
if (card->ptype == ISDN_PTYPE_EURO)
sprintf(cbuf, "%02d;MSNC\n", (int) a);
else
sprintf(cbuf, "%02d;EAZC\n", (int) a);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
case ISDN_CMD_SETL2:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg & 255) < ISDNLOOP_BCH) {
a = c->arg;
switch (a >> 8) {
case ISDN_PROTO_L2_X75I:
sprintf(cbuf, "%02d;BX75\n", (int) (a & 255) + 1);
break;
#ifdef CONFIG_ISDN_X25
case ISDN_PROTO_L2_X25DTE:
sprintf(cbuf, "%02d;BX2T\n", (int) (a & 255) + 1);
break;
case ISDN_PROTO_L2_X25DCE:
sprintf(cbuf, "%02d;BX2C\n", (int) (a & 255) + 1);
break;
#endif
case ISDN_PROTO_L2_HDLC:
sprintf(cbuf, "%02d;BTRA\n", (int) (a & 255) + 1);
break;
case ISDN_PROTO_L2_TRANS:
sprintf(cbuf, "%02d;BTRA\n", (int) (a & 255) + 1);
break;
default:
return -EINVAL;
}
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
card->l2_proto[a & 255] = (a >> 8);
}
break;
case ISDN_CMD_SETL3:
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
return 0;
default:
return -EINVAL;
}
}
return 0;
}
/*
* Find card with given driverId
*/
static inline isdnloop_card *
isdnloop_findcard(int driverid)
{
isdnloop_card *p = cards;
while (p) {
if (p->myid == driverid)
return p;
p = p->next;
}
return (isdnloop_card *) 0;
}
/*
* Wrapper functions for interface to linklevel
*/
static int
if_command(isdn_ctrl * c)
{
isdnloop_card *card = isdnloop_findcard(c->driver);
if (card)
return (isdnloop_command(c, card));
printk(KERN_ERR
"isdnloop: if_command called with invalid driverId!\n");
return -ENODEV;
}
static int
if_writecmd(const u_char __user *buf, int len, int id, int channel)
{
isdnloop_card *card = isdnloop_findcard(id);
if (card) {
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
return (isdnloop_writecmd(buf, len, 1, card));
}
printk(KERN_ERR
"isdnloop: if_writecmd called with invalid driverId!\n");
return -ENODEV;
}
static int
if_readstatus(u_char __user *buf, int len, int id, int channel)
{
isdnloop_card *card = isdnloop_findcard(id);
if (card) {
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
return (isdnloop_readstatus(buf, len, card));
}
printk(KERN_ERR
"isdnloop: if_readstatus called with invalid driverId!\n");
return -ENODEV;
}
static int
if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
{
isdnloop_card *card = isdnloop_findcard(id);
if (card) {
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
/* ack request stored in skb scratch area */
*(skb->head) = ack;
return (isdnloop_sendbuf(channel, skb, card));
}
printk(KERN_ERR
"isdnloop: if_sendbuf called with invalid driverId!\n");
return -ENODEV;
}
/*
* Allocate a new card-struct, initialize it
* link it into cards-list and register it at linklevel.
*/
static isdnloop_card *
isdnloop_initcard(char *id)
{
isdnloop_card *card;
int i;
if (!(card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL))) {
printk(KERN_WARNING
"isdnloop: (%s) Could not allocate card-struct.\n", id);
return (isdnloop_card *) 0;
}
card->interface.owner = THIS_MODULE;
card->interface.channels = ISDNLOOP_BCH;
card->interface.hl_hdrlen = 1; /* scratch area for storing ack flag*/
card->interface.maxbufsize = 4000;
card->interface.command = if_command;
card->interface.writebuf_skb = if_sendbuf;
card->interface.writecmd = if_writecmd;
card->interface.readstat = if_readstatus;
card->interface.features = ISDN_FEATURE_L2_X75I |
#ifdef CONFIG_ISDN_X25
ISDN_FEATURE_L2_X25DTE |
ISDN_FEATURE_L2_X25DCE |
#endif
ISDN_FEATURE_L2_HDLC |
ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_P_UNKNOWN;
card->ptype = ISDN_PTYPE_UNKNOWN;
strlcpy(card->interface.id, id, sizeof(card->interface.id));
card->msg_buf_write = card->msg_buf;
card->msg_buf_read = card->msg_buf;
card->msg_buf_end = &card->msg_buf[sizeof(card->msg_buf) - 1];
for (i = 0; i < ISDNLOOP_BCH; i++) {
card->l2_proto[i] = ISDN_PROTO_L2_X75I;
skb_queue_head_init(&card->bqueue[i]);
}
skb_queue_head_init(&card->dqueue);
spin_lock_init(&card->isdnloop_lock);
card->next = cards;
cards = card;
if (!register_isdn(&card->interface)) {
cards = cards->next;
printk(KERN_WARNING
"isdnloop: Unable to register %s\n", id);
kfree(card);
return (isdnloop_card *) 0;
}
card->myid = card->interface.channels;
return card;
}
static int
isdnloop_addcard(char *id1)
{
isdnloop_card *card;
if (!(card = isdnloop_initcard(id1))) {
return -EIO;
}
printk(KERN_INFO
"isdnloop: (%s) virtual card added\n",
card->interface.id);
return 0;
}
static int __init
isdnloop_init(void)
{
char *p;
char rev[10];
if ((p = strchr(revision, ':'))) {
strcpy(rev, p + 1);
p = strchr(rev, '$');
*p = 0;
} else
strcpy(rev, " ??? ");
printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
if (isdnloop_id)
return (isdnloop_addcard(isdnloop_id));
return 0;
}
static void __exit
isdnloop_exit(void)
{
isdn_ctrl cmd;
isdnloop_card *card = cards;
isdnloop_card *last;
int i;
isdnloop_stopallcards();
while (card) {
cmd.command = ISDN_STAT_UNLOAD;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
for (i = 0; i < ISDNLOOP_BCH; i++)
isdnloop_free_queue(card, i);
card = card->next;
}
card = cards;
while (card) {
last = card;
skb_queue_purge(&card->dqueue);
card = card->next;
kfree(last);
}
printk(KERN_NOTICE "isdnloop-ISDN-driver unloaded\n");
}
module_init(isdnloop_init);
module_exit(isdnloop_exit);
| gpl-2.0 |
wenfengliaoshuzhai/linux | sound/usb/line6/driver.c | 1021 | 15733 | /*
* Line 6 Linux USB driver
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <sound/core.h>
#include <sound/initval.h>
#include "capture.h"
#include "driver.h"
#include "midi.h"
#include "playback.h"
#define DRIVER_AUTHOR "Markus Grabner <grabner@icg.tugraz.at>"
#define DRIVER_DESC "Line 6 USB Driver"
/*
This is Line 6's MIDI manufacturer ID.
*/
const unsigned char line6_midi_id[] = {
0x00, 0x01, 0x0c
};
EXPORT_SYMBOL_GPL(line6_midi_id);
/*
Code to request version of POD, Variax interface
(and maybe other devices).
*/
static const char line6_request_version[] = {
0xf0, 0x7e, 0x7f, 0x06, 0x01, 0xf7
};
/*
Class for asynchronous messages.
*/
struct message {
struct usb_line6 *line6;
const char *buffer;
int size;
int done;
};
/*
Forward declarations.
*/
static void line6_data_received(struct urb *urb);
static int line6_send_raw_message_async_part(struct message *msg,
struct urb *urb);
/*
Start to listen on endpoint.
*/
static int line6_start_listen(struct usb_line6 *line6)
{
int err;
usb_fill_int_urb(line6->urb_listen, line6->usbdev,
usb_rcvintpipe(line6->usbdev, line6->properties->ep_ctrl_r),
line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
line6_data_received, line6, line6->interval);
line6->urb_listen->actual_length = 0;
err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC);
return err;
}
/*
Stop listening on endpoint.
*/
static void line6_stop_listen(struct usb_line6 *line6)
{
usb_kill_urb(line6->urb_listen);
}
/*
Send raw message in pieces of wMaxPacketSize bytes.
*/
static int line6_send_raw_message(struct usb_line6 *line6, const char *buffer,
int size)
{
int i, done = 0;
for (i = 0; i < size; i += line6->max_packet_size) {
int partial;
const char *frag_buf = buffer + i;
int frag_size = min(line6->max_packet_size, size - i);
int retval;
retval = usb_interrupt_msg(line6->usbdev,
usb_sndintpipe(line6->usbdev,
line6->properties->ep_ctrl_w),
(char *)frag_buf, frag_size,
&partial, LINE6_TIMEOUT * HZ);
if (retval) {
dev_err(line6->ifcdev,
"usb_interrupt_msg failed (%d)\n", retval);
break;
}
done += frag_size;
}
return done;
}
/*
Notification of completion of asynchronous request transmission.
*/
static void line6_async_request_sent(struct urb *urb)
{
struct message *msg = (struct message *)urb->context;
if (msg->done >= msg->size) {
usb_free_urb(urb);
kfree(msg);
} else
line6_send_raw_message_async_part(msg, urb);
}
/*
Asynchronously send part of a raw message.
*/
static int line6_send_raw_message_async_part(struct message *msg,
struct urb *urb)
{
int retval;
struct usb_line6 *line6 = msg->line6;
int done = msg->done;
int bytes = min(msg->size - done, line6->max_packet_size);
usb_fill_int_urb(urb, line6->usbdev,
usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w),
(char *)msg->buffer + done, bytes,
line6_async_request_sent, msg, line6->interval);
msg->done += bytes;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval < 0) {
dev_err(line6->ifcdev, "%s: usb_submit_urb failed (%d)\n",
__func__, retval);
usb_free_urb(urb);
kfree(msg);
return retval;
}
return 0;
}
/*
Setup and start timer.
*/
void line6_start_timer(struct timer_list *timer, unsigned long msecs,
void (*function)(unsigned long), unsigned long data)
{
setup_timer(timer, function, data);
mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL_GPL(line6_start_timer);
/*
Asynchronously send raw message.
*/
int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer,
int size)
{
struct message *msg;
struct urb *urb;
/* create message: */
msg = kmalloc(sizeof(struct message), GFP_ATOMIC);
if (msg == NULL)
return -ENOMEM;
/* create URB: */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb == NULL) {
kfree(msg);
return -ENOMEM;
}
/* set message data: */
msg->line6 = line6;
msg->buffer = buffer;
msg->size = size;
msg->done = 0;
/* start sending: */
return line6_send_raw_message_async_part(msg, urb);
}
EXPORT_SYMBOL_GPL(line6_send_raw_message_async);
/*
Send asynchronous device version request.
*/
int line6_version_request_async(struct usb_line6 *line6)
{
char *buffer;
int retval;
buffer = kmemdup(line6_request_version,
sizeof(line6_request_version), GFP_ATOMIC);
if (buffer == NULL)
return -ENOMEM;
retval = line6_send_raw_message_async(line6, buffer,
sizeof(line6_request_version));
kfree(buffer);
return retval;
}
EXPORT_SYMBOL_GPL(line6_version_request_async);
/*
Send sysex message in pieces of wMaxPacketSize bytes.
*/
int line6_send_sysex_message(struct usb_line6 *line6, const char *buffer,
int size)
{
return line6_send_raw_message(line6, buffer,
size + SYSEX_EXTRA_SIZE) -
SYSEX_EXTRA_SIZE;
}
EXPORT_SYMBOL_GPL(line6_send_sysex_message);
/*
Allocate buffer for sysex message and prepare header.
@param code sysex message code
@param size number of bytes between code and sysex end
*/
char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, int code2,
int size)
{
char *buffer = kmalloc(size + SYSEX_EXTRA_SIZE, GFP_ATOMIC);
if (!buffer)
return NULL;
buffer[0] = LINE6_SYSEX_BEGIN;
memcpy(buffer + 1, line6_midi_id, sizeof(line6_midi_id));
buffer[sizeof(line6_midi_id) + 1] = code1;
buffer[sizeof(line6_midi_id) + 2] = code2;
buffer[sizeof(line6_midi_id) + 3 + size] = LINE6_SYSEX_END;
return buffer;
}
EXPORT_SYMBOL_GPL(line6_alloc_sysex_buffer);
/*
Notification of data received from the Line 6 device.
*/
static void line6_data_received(struct urb *urb)
{
struct usb_line6 *line6 = (struct usb_line6 *)urb->context;
struct midi_buffer *mb = &line6->line6midi->midibuf_in;
int done;
if (urb->status == -ESHUTDOWN)
return;
done =
line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
if (done < urb->actual_length) {
line6_midibuf_ignore(mb, done);
dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n",
done, urb->actual_length);
}
for (;;) {
done =
line6_midibuf_read(mb, line6->buffer_message,
LINE6_MESSAGE_MAXLEN);
if (done == 0)
break;
line6->message_length = done;
line6_midi_receive(line6, line6->buffer_message, done);
if (line6->process_message)
line6->process_message(line6);
}
line6_start_listen(line6);
}
#define LINE6_READ_WRITE_STATUS_DELAY 2 /* milliseconds */
#define LINE6_READ_WRITE_MAX_RETRIES 50
/*
Read data from device.
*/
int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
unsigned datalen)
{
struct usb_device *usbdev = line6->usbdev;
int ret;
unsigned char len;
unsigned count;
if (address > 0xffff || datalen > 0xff)
return -EINVAL;
/* query the serial number: */
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
(datalen << 8) | 0x21, address,
NULL, 0, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
return ret;
}
/* Wait for data length. We'll get 0xff until length arrives. */
for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
mdelay(LINE6_READ_WRITE_STATUS_DELAY);
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
0x0012, 0x0000, &len, 1,
LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receive length failed (error %d)\n", ret);
return ret;
}
if (len != 0xff)
break;
}
if (len == 0xff) {
dev_err(line6->ifcdev, "read failed after %d retries\n",
count);
return -EIO;
} else if (len != datalen) {
/* should be equal or something went wrong */
dev_err(line6->ifcdev,
"length mismatch (expected %d, got %d)\n",
(int)datalen, (int)len);
return -EIO;
}
/* receive the result: */
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0013, 0x0000, data, datalen,
LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(line6_read_data);
/*
Write data to device.
*/
int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
unsigned datalen)
{
struct usb_device *usbdev = line6->usbdev;
int ret;
unsigned char status;
int count;
if (address > 0xffff || datalen > 0xffff)
return -EINVAL;
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0022, address, data, datalen,
LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"write request failed (error %d)\n", ret);
return ret;
}
for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
mdelay(LINE6_READ_WRITE_STATUS_DELAY);
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
0x0012, 0x0000,
&status, 1, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receiving status failed (error %d)\n", ret);
return ret;
}
if (status != 0xff)
break;
}
if (status == 0xff) {
dev_err(line6->ifcdev, "write failed after %d retries\n",
count);
return -EIO;
} else if (status != 0) {
dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(line6_write_data);
/*
Read Line 6 device serial number.
(POD, TonePort, GuitarPort)
*/
int line6_read_serial_number(struct usb_line6 *line6, u32 *serial_number)
{
return line6_read_data(line6, 0x80d0, serial_number,
sizeof(*serial_number));
}
EXPORT_SYMBOL_GPL(line6_read_serial_number);
/*
Card destructor.
*/
static void line6_destruct(struct snd_card *card)
{
struct usb_line6 *line6 = card->private_data;
struct usb_device *usbdev = line6->usbdev;
/* free buffer memory first: */
kfree(line6->buffer_message);
kfree(line6->buffer_listen);
/* then free URBs: */
usb_free_urb(line6->urb_listen);
/* decrement reference counters: */
usb_put_dev(usbdev);
}
/* get data from endpoint descriptor (see usb_maxpacket): */
static void line6_get_interval(struct usb_line6 *line6)
{
struct usb_device *usbdev = line6->usbdev;
struct usb_host_endpoint *ep;
unsigned pipe = usb_rcvintpipe(usbdev, line6->properties->ep_ctrl_r);
unsigned epnum = usb_pipeendpoint(pipe);
ep = usbdev->ep_in[epnum];
if (ep) {
line6->interval = ep->desc.bInterval;
line6->max_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
} else {
dev_err(line6->ifcdev,
"endpoint not available, using fallback values");
line6->interval = LINE6_FALLBACK_INTERVAL;
line6->max_packet_size = LINE6_FALLBACK_MAXPACKETSIZE;
}
}
static int line6_init_cap_control(struct usb_line6 *line6)
{
int ret;
/* initialize USB buffers: */
line6->buffer_listen = kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
if (!line6->buffer_listen)
return -ENOMEM;
line6->buffer_message = kmalloc(LINE6_MESSAGE_MAXLEN, GFP_KERNEL);
if (!line6->buffer_message)
return -ENOMEM;
line6->urb_listen = usb_alloc_urb(0, GFP_KERNEL);
if (!line6->urb_listen)
return -ENOMEM;
ret = line6_start_listen(line6);
if (ret < 0) {
dev_err(line6->ifcdev, "cannot start listening: %d\n", ret);
return ret;
}
return 0;
}
/*
Probe USB device.
*/
int line6_probe(struct usb_interface *interface,
const struct usb_device_id *id,
const char *driver_name,
const struct line6_properties *properties,
int (*private_init)(struct usb_line6 *, const struct usb_device_id *id),
size_t data_size)
{
struct usb_device *usbdev = interface_to_usbdev(interface);
struct snd_card *card;
struct usb_line6 *line6;
int interface_number;
int ret;
if (WARN_ON(data_size < sizeof(*line6)))
return -EINVAL;
/* we don't handle multiple configurations */
if (usbdev->descriptor.bNumConfigurations != 1)
return -ENODEV;
ret = snd_card_new(&interface->dev,
SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, data_size, &card);
if (ret < 0)
return ret;
/* store basic data: */
line6 = card->private_data;
line6->card = card;
line6->properties = properties;
line6->usbdev = usbdev;
line6->ifcdev = &interface->dev;
strcpy(card->id, properties->id);
strcpy(card->driver, driver_name);
strcpy(card->shortname, properties->name);
sprintf(card->longname, "Line 6 %s at USB %s", properties->name,
dev_name(line6->ifcdev));
card->private_free = line6_destruct;
usb_set_intfdata(interface, line6);
/* increment reference counters: */
usb_get_dev(usbdev);
/* initialize device info: */
dev_info(&interface->dev, "Line 6 %s found\n", properties->name);
/* query interface number */
interface_number = interface->cur_altsetting->desc.bInterfaceNumber;
ret = usb_set_interface(usbdev, interface_number,
properties->altsetting);
if (ret < 0) {
dev_err(&interface->dev, "set_interface failed\n");
goto error;
}
line6_get_interval(line6);
if (properties->capabilities & LINE6_CAP_CONTROL) {
ret = line6_init_cap_control(line6);
if (ret < 0)
goto error;
}
/* initialize device data based on device: */
ret = private_init(line6, id);
if (ret < 0)
goto error;
/* creation of additional special files should go here */
dev_info(&interface->dev, "Line 6 %s now attached\n",
properties->name);
return 0;
error:
if (line6->disconnect)
line6->disconnect(line6);
snd_card_free(card);
return ret;
}
EXPORT_SYMBOL_GPL(line6_probe);
/*
Line 6 device disconnected.
*/
void line6_disconnect(struct usb_interface *interface)
{
struct usb_line6 *line6 = usb_get_intfdata(interface);
struct usb_device *usbdev = interface_to_usbdev(interface);
if (!line6)
return;
if (WARN_ON(usbdev != line6->usbdev))
return;
if (line6->urb_listen != NULL)
line6_stop_listen(line6);
snd_card_disconnect(line6->card);
if (line6->line6pcm)
line6_pcm_disconnect(line6->line6pcm);
if (line6->disconnect)
line6->disconnect(line6);
dev_info(&interface->dev, "Line 6 %s now disconnected\n",
line6->properties->name);
/* make sure the device isn't destructed twice: */
usb_set_intfdata(interface, NULL);
snd_card_free_when_closed(line6->card);
}
EXPORT_SYMBOL_GPL(line6_disconnect);
#ifdef CONFIG_PM
/*
Suspend Line 6 device.
*/
int line6_suspend(struct usb_interface *interface, pm_message_t message)
{
struct usb_line6 *line6 = usb_get_intfdata(interface);
struct snd_line6_pcm *line6pcm = line6->line6pcm;
snd_power_change_state(line6->card, SNDRV_CTL_POWER_D3hot);
if (line6->properties->capabilities & LINE6_CAP_CONTROL)
line6_stop_listen(line6);
if (line6pcm != NULL) {
snd_pcm_suspend_all(line6pcm->pcm);
line6pcm->flags = 0;
}
return 0;
}
EXPORT_SYMBOL_GPL(line6_suspend);
/*
Resume Line 6 device.
*/
int line6_resume(struct usb_interface *interface)
{
struct usb_line6 *line6 = usb_get_intfdata(interface);
if (line6->properties->capabilities & LINE6_CAP_CONTROL)
line6_start_listen(line6);
snd_power_change_state(line6->card, SNDRV_CTL_POWER_D0);
return 0;
}
EXPORT_SYMBOL_GPL(line6_resume);
#endif /* CONFIG_PM */
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
imoseyon/leanKernel-shamu | arch/arm/mach-shmobile/board-kzm9g-reference.c | 2045 | 3582 | /*
* KZM-A9-GT board support - Reference Device Tree Implementation
*
* Copyright (C) 2012 Horms Solutions Ltd.
*
* Based on board-kzm9g.c
* Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/input.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <mach/sh73a0.h>
#include <mach/common.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
static unsigned long pin_pullup_conf[] = {
PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0),
};
static const struct pinctrl_map kzm_pinctrl_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("e6826000.i2c", "pfc-sh73a0",
"i2c3_1", "i2c3"),
/* MMCIF */
PIN_MAP_MUX_GROUP_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"mmc0_data8_0", "mmc0"),
PIN_MAP_MUX_GROUP_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"mmc0_ctrl_0", "mmc0"),
PIN_MAP_CONFIGS_PIN_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"PORT279", pin_pullup_conf),
PIN_MAP_CONFIGS_GROUP_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"mmc0_data8_0", pin_pullup_conf),
/* SCIFA4 */
PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
"scifa4_data", "scifa4"),
PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
"scifa4_ctrl", "scifa4"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_data4", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_ctrl", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_cd", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_wp", "sdhi0"),
/* SDHI2 */
PIN_MAP_MUX_GROUP_DEFAULT("ee140000.sdhi", "pfc-sh73a0",
"sdhi2_data4", "sdhi2"),
PIN_MAP_MUX_GROUP_DEFAULT("ee140000.sdhi", "pfc-sh73a0",
"sdhi2_ctrl", "sdhi2"),
};
static void __init kzm_init(void)
{
sh73a0_add_standard_devices_dt();
pinctrl_register_mappings(kzm_pinctrl_map, ARRAY_SIZE(kzm_pinctrl_map));
sh73a0_pinmux_init();
/* enable SD */
gpio_request(GPIO_FN_SDHI0_VCCQ_MC0_ON, NULL);
gpio_request_one(15, GPIOF_OUT_INIT_HIGH, NULL); /* power */
gpio_request_one(14, GPIOF_OUT_INIT_HIGH, NULL); /* power */
#ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*8way */
l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
#endif
}
static const char *kzm9g_boards_compat_dt[] __initdata = {
"renesas,kzm9g-reference",
NULL,
};
DT_MACHINE_START(KZM9G_DT, "kzm9g-reference")
.smp = smp_ops(sh73a0_smp_ops),
.map_io = sh73a0_map_io,
.init_early = sh73a0_init_delay,
.nr_irqs = NR_IRQS_LEGACY,
.init_irq = irqchip_init,
.init_machine = kzm_init,
.init_time = shmobile_timer_init,
.dt_compat = kzm9g_boards_compat_dt,
MACHINE_END
| gpl-2.0 |
belalang-tempur/android_kernel_xiaomi_ferrari | drivers/net/wireless/zd1211rw/zd_usb.c | 2301 | 51714 | /* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
* Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
* Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "zd_def.h"
#include "zd_mac.h"
#include "zd_usb.h"
static struct usb_device_id usb_ids[] = {
/* ZD1211 */
{ USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0586, 0x3409), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0b3b, 0x1630), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0df6, 0x9075), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x14ea, 0xab10), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe501), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0b05, 0x171b), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x2019, 0xed01), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
{}
};
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("USB driver for devices with the ZD1211 chip.");
MODULE_AUTHOR("Ulrich Kunitz");
MODULE_AUTHOR("Daniel Drake");
MODULE_VERSION("1.0");
MODULE_DEVICE_TABLE(usb, usb_ids);
#define FW_ZD1211_PREFIX "zd1211/zd1211_"
#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
unsigned int count);
/* USB device initialization */
static void int_urb_complete(struct urb *urb);
static int request_fw_file(
const struct firmware **fw, const char *name, struct device *device)
{
int r;
dev_dbg_f(device, "fw name %s\n", name);
r = request_firmware(fw, name, device);
if (r)
dev_err(device,
"Could not load firmware file %s. Error number %d\n",
name, r);
return r;
}
static inline u16 get_bcdDevice(const struct usb_device *udev)
{
return le16_to_cpu(udev->descriptor.bcdDevice);
}
enum upload_code_flags {
REBOOT = 1,
};
/* Ensures that MAX_TRANSFER_SIZE is even. */
#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1)
static int upload_code(struct usb_device *udev,
const u8 *data, size_t size, u16 code_offset, int flags)
{
u8 *p;
int r;
/* USB request blocks need "kmalloced" buffers.
*/
p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL);
if (!p) {
r = -ENOMEM;
goto error;
}
size &= ~1;
while (size > 0) {
size_t transfer_size = size <= MAX_TRANSFER_SIZE ?
size : MAX_TRANSFER_SIZE;
dev_dbg_f(&udev->dev, "transfer size %zu\n", transfer_size);
memcpy(p, data, transfer_size);
r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_FIRMWARE_DOWNLOAD,
USB_DIR_OUT | USB_TYPE_VENDOR,
code_offset, 0, p, transfer_size, 1000 /* ms */);
if (r < 0) {
dev_err(&udev->dev,
"USB control request for firmware upload"
" failed. Error number %d\n", r);
goto error;
}
transfer_size = r & ~1;
size -= transfer_size;
data += transfer_size;
code_offset += transfer_size/sizeof(u16);
}
if (flags & REBOOT) {
u8 ret;
/* Use "DMA-aware" buffer. */
r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_FIRMWARE_CONFIRM,
USB_DIR_IN | USB_TYPE_VENDOR,
0, 0, p, sizeof(ret), 5000 /* ms */);
if (r != sizeof(ret)) {
dev_err(&udev->dev,
"control request firmeware confirmation failed."
" Return value %d\n", r);
if (r >= 0)
r = -ENODEV;
goto error;
}
ret = p[0];
if (ret & 0x80) {
dev_err(&udev->dev,
"Internal error while downloading."
" Firmware confirm return value %#04x\n",
(unsigned int)ret);
r = -ENODEV;
goto error;
}
dev_dbg_f(&udev->dev, "firmware confirm return value %#04x\n",
(unsigned int)ret);
}
r = 0;
error:
kfree(p);
return r;
}
static u16 get_word(const void *data, u16 offset)
{
const __le16 *p = data;
return le16_to_cpu(p[offset]);
}
static char *get_fw_name(struct zd_usb *usb, char *buffer, size_t size,
const char* postfix)
{
scnprintf(buffer, size, "%s%s",
usb->is_zd1211b ?
FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX,
postfix);
return buffer;
}
static int handle_version_mismatch(struct zd_usb *usb,
const struct firmware *ub_fw)
{
struct usb_device *udev = zd_usb_to_usbdev(usb);
const struct firmware *ur_fw = NULL;
int offset;
int r = 0;
char fw_name[128];
r = request_fw_file(&ur_fw,
get_fw_name(usb, fw_name, sizeof(fw_name), "ur"),
&udev->dev);
if (r)
goto error;
r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START, REBOOT);
if (r)
goto error;
offset = (E2P_BOOT_CODE_OFFSET * sizeof(u16));
r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset,
E2P_START + E2P_BOOT_CODE_OFFSET, REBOOT);
/* At this point, the vendor driver downloads the whole firmware
* image, hacks around with version IDs, and uploads it again,
* completely overwriting the boot code. We do not do this here as
* it is not required on any tested devices, and it is suspected to
* cause problems. */
error:
release_firmware(ur_fw);
return r;
}
static int upload_firmware(struct zd_usb *usb)
{
int r;
u16 fw_bcdDevice;
u16 bcdDevice;
struct usb_device *udev = zd_usb_to_usbdev(usb);
const struct firmware *ub_fw = NULL;
const struct firmware *uph_fw = NULL;
char fw_name[128];
bcdDevice = get_bcdDevice(udev);
r = request_fw_file(&ub_fw,
get_fw_name(usb, fw_name, sizeof(fw_name), "ub"),
&udev->dev);
if (r)
goto error;
fw_bcdDevice = get_word(ub_fw->data, E2P_DATA_OFFSET);
if (fw_bcdDevice != bcdDevice) {
dev_info(&udev->dev,
"firmware version %#06x and device bootcode version "
"%#06x differ\n", fw_bcdDevice, bcdDevice);
if (bcdDevice <= 0x4313)
dev_warn(&udev->dev, "device has old bootcode, please "
"report success or failure\n");
r = handle_version_mismatch(usb, ub_fw);
if (r)
goto error;
} else {
dev_dbg_f(&udev->dev,
"firmware device id %#06x is equal to the "
"actual device id\n", fw_bcdDevice);
}
r = request_fw_file(&uph_fw,
get_fw_name(usb, fw_name, sizeof(fw_name), "uphr"),
&udev->dev);
if (r)
goto error;
r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START, REBOOT);
if (r) {
dev_err(&udev->dev,
"Could not upload firmware code uph. Error number %d\n",
r);
}
/* FALL-THROUGH */
error:
release_firmware(ub_fw);
release_firmware(uph_fw);
return r;
}
MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ur");
MODULE_FIRMWARE(FW_ZD1211_PREFIX "ur");
MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ub");
MODULE_FIRMWARE(FW_ZD1211_PREFIX "ub");
MODULE_FIRMWARE(FW_ZD1211B_PREFIX "uphr");
MODULE_FIRMWARE(FW_ZD1211_PREFIX "uphr");
/* Read data from device address space using "firmware interface" which does
* not require firmware to be loaded. */
int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len)
{
int r;
struct usb_device *udev = zd_usb_to_usbdev(usb);
u8 *buf;
/* Use "DMA-aware" buffer. */
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0,
buf, len, 5000);
if (r < 0) {
dev_err(&udev->dev,
"read over firmware interface failed: %d\n", r);
goto exit;
} else if (r != len) {
dev_err(&udev->dev,
"incomplete read over firmware interface: %d/%d\n",
r, len);
r = -EIO;
goto exit;
}
r = 0;
memcpy(data, buf, len);
exit:
kfree(buf);
return r;
}
#define urb_dev(urb) (&(urb)->dev->dev)
static inline void handle_regs_int_override(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
spin_lock(&intr->lock);
if (atomic_read(&intr->read_regs_enabled)) {
atomic_set(&intr->read_regs_enabled, 0);
intr->read_regs_int_overridden = 1;
complete(&intr->read_regs.completion);
}
spin_unlock(&intr->lock);
}
static inline void handle_regs_int(struct urb *urb)
{
struct zd_usb *usb = urb->context;
struct zd_usb_interrupt *intr = &usb->intr;
int len;
u16 int_num;
ZD_ASSERT(in_interrupt());
spin_lock(&intr->lock);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
if (int_num == CR_INTERRUPT) {
struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
spin_lock(&mac->lock);
memcpy(&mac->intr_buffer, urb->transfer_buffer,
USB_MAX_EP_INT_BUFFER);
spin_unlock(&mac->lock);
schedule_work(&mac->process_intr);
} else if (atomic_read(&intr->read_regs_enabled)) {
len = urb->actual_length;
intr->read_regs.length = urb->actual_length;
if (len > sizeof(intr->read_regs.buffer))
len = sizeof(intr->read_regs.buffer);
memcpy(intr->read_regs.buffer, urb->transfer_buffer, len);
/* Sometimes USB_INT_ID_REGS is not overridden, but comes after
* USB_INT_ID_RETRY_FAILED. Read-reg retry then gets this
* delayed USB_INT_ID_REGS, but leaves USB_INT_ID_REGS of
* retry unhandled. Next read-reg command then might catch
* this wrong USB_INT_ID_REGS. Fix by ignoring wrong reads.
*/
if (!check_read_regs(usb, intr->read_regs.req,
intr->read_regs.req_count))
goto out;
atomic_set(&intr->read_regs_enabled, 0);
intr->read_regs_int_overridden = 0;
complete(&intr->read_regs.completion);
goto out;
}
out:
spin_unlock(&intr->lock);
/* CR_INTERRUPT might override read_reg too. */
if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
handle_regs_int_override(urb);
}
static void int_urb_complete(struct urb *urb)
{
int r;
struct usb_int_header *hdr;
struct zd_usb *usb;
struct zd_usb_interrupt *intr;
switch (urb->status) {
case 0:
break;
case -ESHUTDOWN:
case -EINVAL:
case -ENODEV:
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
return;
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;
}
if (urb->actual_length < sizeof(hdr)) {
dev_dbg_f(urb_dev(urb), "error: urb %p to small\n", urb);
goto resubmit;
}
hdr = urb->transfer_buffer;
if (hdr->type != USB_INT_TYPE) {
dev_dbg_f(urb_dev(urb), "error: urb %p wrong type\n", urb);
goto resubmit;
}
/* USB_INT_ID_RETRY_FAILED triggered by tx-urb submit can override
* pending USB_INT_ID_REGS causing read command timeout.
*/
usb = urb->context;
intr = &usb->intr;
if (hdr->id != USB_INT_ID_REGS && atomic_read(&intr->read_regs_enabled))
handle_regs_int_override(urb);
switch (hdr->id) {
case USB_INT_ID_REGS:
handle_regs_int(urb);
break;
case USB_INT_ID_RETRY_FAILED:
zd_mac_tx_failed(urb);
break;
default:
dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
(unsigned int)hdr->id);
goto resubmit;
}
resubmit:
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
urb, r);
/* TODO: add worker to reset intr->urb */
}
return;
}
static inline int int_urb_interval(struct usb_device *udev)
{
switch (udev->speed) {
case USB_SPEED_HIGH:
return 4;
case USB_SPEED_LOW:
return 10;
case USB_SPEED_FULL:
default:
return 1;
}
}
static inline int usb_int_enabled(struct zd_usb *usb)
{
unsigned long flags;
struct zd_usb_interrupt *intr = &usb->intr;
struct urb *urb;
spin_lock_irqsave(&intr->lock, flags);
urb = intr->urb;
spin_unlock_irqrestore(&intr->lock, flags);
return urb != NULL;
}
int zd_usb_enable_int(struct zd_usb *usb)
{
int r;
struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
struct urb *urb;
dev_dbg_f(zd_usb_dev(usb), "\n");
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
r = -ENOMEM;
goto out;
}
ZD_ASSERT(!irqs_disabled());
spin_lock_irq(&intr->lock);
if (intr->urb) {
spin_unlock_irq(&intr->lock);
r = 0;
goto error_free_urb;
}
intr->urb = urb;
spin_unlock_irq(&intr->lock);
r = -ENOMEM;
intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
GFP_KERNEL, &intr->buffer_dma);
if (!intr->buffer) {
dev_dbg_f(zd_usb_dev(usb),
"couldn't allocate transfer_buffer\n");
goto error_set_urb_null;
}
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
intr->buffer, USB_MAX_EP_INT_BUFFER,
int_urb_complete, usb,
intr->interval);
urb->transfer_dma = intr->buffer_dma;
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
r = usb_submit_urb(urb, GFP_KERNEL);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"Couldn't submit urb. Error number %d\n", r);
goto error;
}
return 0;
error:
usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
intr->buffer, intr->buffer_dma);
error_set_urb_null:
spin_lock_irq(&intr->lock);
intr->urb = NULL;
spin_unlock_irq(&intr->lock);
error_free_urb:
usb_free_urb(urb);
out:
return r;
}
void zd_usb_disable_int(struct zd_usb *usb)
{
unsigned long flags;
struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
struct urb *urb;
void *buffer;
dma_addr_t buffer_dma;
spin_lock_irqsave(&intr->lock, flags);
urb = intr->urb;
if (!urb) {
spin_unlock_irqrestore(&intr->lock, flags);
return;
}
intr->urb = NULL;
buffer = intr->buffer;
buffer_dma = intr->buffer_dma;
intr->buffer = NULL;
spin_unlock_irqrestore(&intr->lock, flags);
usb_kill_urb(urb);
dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
usb_free_urb(urb);
if (buffer)
usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
buffer, buffer_dma);
}
static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
unsigned int length)
{
int i;
const struct rx_length_info *length_info;
if (length < sizeof(struct rx_length_info)) {
/* It's not a complete packet anyhow. */
dev_dbg_f(zd_usb_dev(usb), "invalid, small RX packet : %d\n",
length);
return;
}
length_info = (struct rx_length_info *)
(buffer + length - sizeof(struct rx_length_info));
/* It might be that three frames are merged into a single URB
* transaction. We have to check for the length info tag.
*
* While testing we discovered that length_info might be unaligned,
* because if USB transactions are merged, the last packet will not
* be padded. Unaligned access might also happen if the length_info
* structure is not present.
*/
if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG)
{
unsigned int l, k, n;
for (i = 0, l = 0;; i++) {
k = get_unaligned_le16(&length_info->length[i]);
if (k == 0)
return;
n = l+k;
if (n > length)
return;
zd_mac_rx(zd_usb_to_hw(usb), buffer+l, k);
if (i >= 2)
return;
l = (n+3) & ~3;
}
} else {
zd_mac_rx(zd_usb_to_hw(usb), buffer, length);
}
}
static void rx_urb_complete(struct urb *urb)
{
int r;
struct zd_usb *usb;
struct zd_usb_rx *rx;
const u8 *buffer;
unsigned int length;
switch (urb->status) {
case 0:
break;
case -ESHUTDOWN:
case -EINVAL:
case -ENODEV:
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
return;
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;
}
buffer = urb->transfer_buffer;
length = urb->actual_length;
usb = urb->context;
rx = &usb->rx;
tasklet_schedule(&rx->reset_timer_tasklet);
if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
/* If there is an old first fragment, we don't care. */
dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment));
spin_lock(&rx->lock);
memcpy(rx->fragment, buffer, length);
rx->fragment_length = length;
spin_unlock(&rx->lock);
goto resubmit;
}
spin_lock(&rx->lock);
if (rx->fragment_length > 0) {
/* We are on a second fragment, we believe */
ZD_ASSERT(length + rx->fragment_length <=
ARRAY_SIZE(rx->fragment));
dev_dbg_f(urb_dev(urb), "*** second fragment ***\n");
memcpy(rx->fragment+rx->fragment_length, buffer, length);
handle_rx_packet(usb, rx->fragment,
rx->fragment_length + length);
rx->fragment_length = 0;
spin_unlock(&rx->lock);
} else {
spin_unlock(&rx->lock);
handle_rx_packet(usb, buffer, length);
}
resubmit:
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r)
dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
}
static struct urb *alloc_rx_urb(struct zd_usb *usb)
{
struct usb_device *udev = zd_usb_to_usbdev(usb);
struct urb *urb;
void *buffer;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return NULL;
buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
&urb->transfer_dma);
if (!buffer) {
usb_free_urb(urb);
return NULL;
}
usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
buffer, USB_MAX_RX_SIZE,
rx_urb_complete, usb);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
return urb;
}
static void free_rx_urb(struct urb *urb)
{
if (!urb)
return;
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
}
static int __zd_usb_enable_rx(struct zd_usb *usb)
{
int i, r;
struct zd_usb_rx *rx = &usb->rx;
struct urb **urbs;
dev_dbg_f(zd_usb_dev(usb), "\n");
r = -ENOMEM;
urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL);
if (!urbs)
goto error;
for (i = 0; i < RX_URBS_COUNT; i++) {
urbs[i] = alloc_rx_urb(usb);
if (!urbs[i])
goto error;
}
ZD_ASSERT(!irqs_disabled());
spin_lock_irq(&rx->lock);
if (rx->urbs) {
spin_unlock_irq(&rx->lock);
r = 0;
goto error;
}
rx->urbs = urbs;
rx->urbs_count = RX_URBS_COUNT;
spin_unlock_irq(&rx->lock);
for (i = 0; i < RX_URBS_COUNT; i++) {
r = usb_submit_urb(urbs[i], GFP_KERNEL);
if (r)
goto error_submit;
}
return 0;
error_submit:
for (i = 0; i < RX_URBS_COUNT; i++) {
usb_kill_urb(urbs[i]);
}
spin_lock_irq(&rx->lock);
rx->urbs = NULL;
rx->urbs_count = 0;
spin_unlock_irq(&rx->lock);
error:
if (urbs) {
for (i = 0; i < RX_URBS_COUNT; i++)
free_rx_urb(urbs[i]);
}
return r;
}
int zd_usb_enable_rx(struct zd_usb *usb)
{
int r;
struct zd_usb_rx *rx = &usb->rx;
mutex_lock(&rx->setup_mutex);
r = __zd_usb_enable_rx(usb);
mutex_unlock(&rx->setup_mutex);
zd_usb_reset_rx_idle_timer(usb);
return r;
}
static void __zd_usb_disable_rx(struct zd_usb *usb)
{
int i;
unsigned long flags;
struct urb **urbs;
unsigned int count;
struct zd_usb_rx *rx = &usb->rx;
spin_lock_irqsave(&rx->lock, flags);
urbs = rx->urbs;
count = rx->urbs_count;
spin_unlock_irqrestore(&rx->lock, flags);
if (!urbs)
return;
for (i = 0; i < count; i++) {
usb_kill_urb(urbs[i]);
free_rx_urb(urbs[i]);
}
kfree(urbs);
spin_lock_irqsave(&rx->lock, flags);
rx->urbs = NULL;
rx->urbs_count = 0;
spin_unlock_irqrestore(&rx->lock, flags);
}
void zd_usb_disable_rx(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
mutex_lock(&rx->setup_mutex);
__zd_usb_disable_rx(usb);
mutex_unlock(&rx->setup_mutex);
tasklet_kill(&rx->reset_timer_tasklet);
cancel_delayed_work_sync(&rx->idle_work);
}
static void zd_usb_reset_rx(struct zd_usb *usb)
{
bool do_reset;
struct zd_usb_rx *rx = &usb->rx;
unsigned long flags;
mutex_lock(&rx->setup_mutex);
spin_lock_irqsave(&rx->lock, flags);
do_reset = rx->urbs != NULL;
spin_unlock_irqrestore(&rx->lock, flags);
if (do_reset) {
__zd_usb_disable_rx(usb);
__zd_usb_enable_rx(usb);
}
mutex_unlock(&rx->setup_mutex);
if (do_reset)
zd_usb_reset_rx_idle_timer(usb);
}
/**
* zd_usb_disable_tx - disable transmission
* @usb: the zd1211rw-private USB structure
*
* Frees all URBs in the free list and marks the transmission as disabled.
*/
void zd_usb_disable_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
unsigned long flags;
atomic_set(&tx->enabled, 0);
/* kill all submitted tx-urbs */
usb_kill_anchored_urbs(&tx->submitted);
spin_lock_irqsave(&tx->lock, flags);
WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
WARN_ON(tx->submitted_urbs != 0);
tx->submitted_urbs = 0;
spin_unlock_irqrestore(&tx->lock, flags);
/* The stopped state is ignored, relying on ieee80211_wake_queues()
* in a potentionally following zd_usb_enable_tx().
*/
}
/**
* zd_usb_enable_tx - enables transmission
* @usb: a &struct zd_usb pointer
*
* This function enables transmission and prepares the &zd_usb_tx data
* structure.
*/
void zd_usb_enable_tx(struct zd_usb *usb)
{
unsigned long flags;
struct zd_usb_tx *tx = &usb->tx;
spin_lock_irqsave(&tx->lock, flags);
atomic_set(&tx->enabled, 1);
tx->submitted_urbs = 0;
ieee80211_wake_queues(zd_usb_to_hw(usb));
tx->stopped = 0;
spin_unlock_irqrestore(&tx->lock, flags);
}
static void tx_dec_submitted_urbs(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
unsigned long flags;
spin_lock_irqsave(&tx->lock, flags);
--tx->submitted_urbs;
if (tx->stopped && tx->submitted_urbs <= ZD_USB_TX_LOW) {
ieee80211_wake_queues(zd_usb_to_hw(usb));
tx->stopped = 0;
}
spin_unlock_irqrestore(&tx->lock, flags);
}
static void tx_inc_submitted_urbs(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
unsigned long flags;
spin_lock_irqsave(&tx->lock, flags);
++tx->submitted_urbs;
if (!tx->stopped && tx->submitted_urbs > ZD_USB_TX_HIGH) {
ieee80211_stop_queues(zd_usb_to_hw(usb));
tx->stopped = 1;
}
spin_unlock_irqrestore(&tx->lock, flags);
}
/**
* tx_urb_complete - completes the execution of an URB
* @urb: a URB
*
* This function is called if the URB has been transferred to a device or an
* error has happened.
*/
static void tx_urb_complete(struct urb *urb)
{
int r;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct zd_usb *usb;
struct zd_usb_tx *tx;
skb = (struct sk_buff *)urb->context;
info = IEEE80211_SKB_CB(skb);
/*
* grab 'usb' pointer before handing off the skb (since
* it might be freed by zd_mac_tx_to_dev or mac80211)
*/
usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
tx = &usb->tx;
switch (urb->status) {
case 0:
break;
case -ESHUTDOWN:
case -EINVAL:
case -ENODEV:
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
break;
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;
}
free_urb:
skb_unlink(skb, &usb->tx.submitted_skbs);
zd_mac_tx_to_dev(skb, urb->status);
usb_free_urb(urb);
tx_dec_submitted_urbs(usb);
return;
resubmit:
usb_anchor_urb(urb, &tx->submitted);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
usb_unanchor_urb(urb);
dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
goto free_urb;
}
}
/**
* zd_usb_tx: initiates transfer of a frame of the device
*
* @usb: the zd1211rw-private USB structure
* @skb: a &struct sk_buff pointer
*
* This function tranmits a frame to the device. It doesn't wait for
* completion. The frame must contain the control set and have all the
* control set information available.
*
* The function returns 0 if the transfer has been successfully initiated.
*/
int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
{
int r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct usb_device *udev = zd_usb_to_usbdev(usb);
struct urb *urb;
struct zd_usb_tx *tx = &usb->tx;
if (!atomic_read(&tx->enabled)) {
r = -ENOENT;
goto out;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
r = -ENOMEM;
goto out;
}
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
skb->data, skb->len, tx_urb_complete, skb);
info->rate_driver_data[1] = (void *)jiffies;
skb_queue_tail(&tx->submitted_skbs, skb);
usb_anchor_urb(urb, &tx->submitted);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
usb_unanchor_urb(urb);
skb_unlink(skb, &tx->submitted_skbs);
goto error;
}
tx_inc_submitted_urbs(usb);
return 0;
error:
usb_free_urb(urb);
out:
return r;
}
static bool zd_tx_timeout(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
struct sk_buff_head *q = &tx->submitted_skbs;
struct sk_buff *skb, *skbnext;
struct ieee80211_tx_info *info;
unsigned long flags, trans_start;
bool have_timedout = false;
spin_lock_irqsave(&q->lock, flags);
skb_queue_walk_safe(q, skb, skbnext) {
info = IEEE80211_SKB_CB(skb);
trans_start = (unsigned long)info->rate_driver_data[1];
if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
have_timedout = true;
break;
}
}
spin_unlock_irqrestore(&q->lock, flags);
return have_timedout;
}
static void zd_tx_watchdog_handler(struct work_struct *work)
{
struct zd_usb *usb =
container_of(work, struct zd_usb, tx.watchdog_work.work);
struct zd_usb_tx *tx = &usb->tx;
if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
goto out;
if (!zd_tx_timeout(usb))
goto out;
/* TX halted, try reset */
dev_warn(zd_usb_dev(usb), "TX-stall detected, resetting device...");
usb_queue_reset_device(usb->intf);
/* reset will stop this worker, don't rearm */
return;
out:
queue_delayed_work(zd_workqueue, &tx->watchdog_work,
ZD_TX_WATCHDOG_INTERVAL);
}
void zd_tx_watchdog_enable(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
if (!tx->watchdog_enabled) {
dev_dbg_f(zd_usb_dev(usb), "\n");
queue_delayed_work(zd_workqueue, &tx->watchdog_work,
ZD_TX_WATCHDOG_INTERVAL);
tx->watchdog_enabled = 1;
}
}
void zd_tx_watchdog_disable(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
if (tx->watchdog_enabled) {
dev_dbg_f(zd_usb_dev(usb), "\n");
tx->watchdog_enabled = 0;
cancel_delayed_work_sync(&tx->watchdog_work);
}
}
static void zd_rx_idle_timer_handler(struct work_struct *work)
{
struct zd_usb *usb =
container_of(work, struct zd_usb, rx.idle_work.work);
struct zd_mac *mac = zd_usb_to_mac(usb);
if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
return;
dev_dbg_f(zd_usb_dev(usb), "\n");
/* 30 seconds since last rx, reset rx */
zd_usb_reset_rx(usb);
}
static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
{
struct zd_usb *usb = (struct zd_usb *)param;
zd_usb_reset_rx_idle_timer(usb);
}
void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
}
static inline void init_usb_interrupt(struct zd_usb *usb)
{
struct zd_usb_interrupt *intr = &usb->intr;
spin_lock_init(&intr->lock);
intr->interval = int_urb_interval(zd_usb_to_usbdev(usb));
init_completion(&intr->read_regs.completion);
atomic_set(&intr->read_regs_enabled, 0);
intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT);
}
static inline void init_usb_rx(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
spin_lock_init(&rx->lock);
mutex_init(&rx->setup_mutex);
if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
rx->usb_packet_size = 512;
} else {
rx->usb_packet_size = 64;
}
ZD_ASSERT(rx->fragment_length == 0);
INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
rx->reset_timer_tasklet.data = (unsigned long)usb;
}
static inline void init_usb_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
spin_lock_init(&tx->lock);
atomic_set(&tx->enabled, 0);
tx->stopped = 0;
skb_queue_head_init(&tx->submitted_skbs);
init_usb_anchor(&tx->submitted);
tx->submitted_urbs = 0;
tx->watchdog_enabled = 0;
INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
}
void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
struct usb_interface *intf)
{
memset(usb, 0, sizeof(*usb));
usb->intf = usb_get_intf(intf);
usb_set_intfdata(usb->intf, hw);
init_usb_anchor(&usb->submitted_cmds);
init_usb_interrupt(usb);
init_usb_tx(usb);
init_usb_rx(usb);
}
void zd_usb_clear(struct zd_usb *usb)
{
usb_set_intfdata(usb->intf, NULL);
usb_put_intf(usb->intf);
ZD_MEMCLEAR(usb, sizeof(*usb));
/* FIXME: usb_interrupt, usb_tx, usb_rx? */
}
static const char *speed(enum usb_device_speed speed)
{
switch (speed) {
case USB_SPEED_LOW:
return "low";
case USB_SPEED_FULL:
return "full";
case USB_SPEED_HIGH:
return "high";
default:
return "unknown speed";
}
}
static int scnprint_id(struct usb_device *udev, char *buffer, size_t size)
{
return scnprintf(buffer, size, "%04hx:%04hx v%04hx %s",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
get_bcdDevice(udev),
speed(udev->speed));
}
int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size)
{
struct usb_device *udev = interface_to_usbdev(usb->intf);
return scnprint_id(udev, buffer, size);
}
#ifdef DEBUG
static void print_id(struct usb_device *udev)
{
char buffer[40];
scnprint_id(udev, buffer, sizeof(buffer));
buffer[sizeof(buffer)-1] = 0;
dev_dbg_f(&udev->dev, "%s\n", buffer);
}
#else
#define print_id(udev) do { } while (0)
#endif
static int eject_installer(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *iface_desc = &intf->altsetting[0];
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
int r;
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;
if (usb_endpoint_dir_out(endpoint) &&
usb_endpoint_xfer_bulk(endpoint)) {
bulk_out_ep = endpoint->bEndpointAddress;
break;
}
}
if (r == -1) {
dev_err(&udev->dev,
"zd1211rw: Could not find bulk out endpoint\n");
return -ENODEV;
}
cmd = kzalloc(31, GFP_KERNEL);
if (cmd == NULL)
return -ENODEV;
/* USB bulk command block */
cmd[0] = 0x55; /* bulk command signature */
cmd[1] = 0x53; /* bulk command signature */
cmd[2] = 0x42; /* bulk command signature */
cmd[3] = 0x43; /* bulk command signature */
cmd[14] = 6; /* command length */
cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
cmd[19] = 0x2; /* eject disc */
dev_info(&udev->dev, "Ejecting virtual installer media...\n");
r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
cmd, 31, NULL, 2000);
kfree(cmd);
if (r)
return r;
/* At this point, the device disconnects and reconnects with the real
* ID numbers. */
usb_set_intfdata(intf, NULL);
return 0;
}
int zd_usb_init_hw(struct zd_usb *usb)
{
int r;
struct zd_mac *mac = zd_usb_to_mac(usb);
dev_dbg_f(zd_usb_dev(usb), "\n");
r = upload_firmware(usb);
if (r) {
dev_err(zd_usb_dev(usb),
"couldn't load firmware. Error number %d\n", r);
return r;
}
r = usb_reset_configuration(zd_usb_to_usbdev(usb));
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"couldn't reset configuration. Error number %d\n", r);
return r;
}
r = zd_mac_init_hw(mac->hw);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"couldn't initialize mac. Error number %d\n", r);
return r;
}
usb->initialized = 1;
return 0;
}
static int probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int r;
struct usb_device *udev = interface_to_usbdev(intf);
struct zd_usb *usb;
struct ieee80211_hw *hw = NULL;
print_id(udev);
if (id->driver_info & DEVICE_INSTALLER)
return eject_installer(intf);
switch (udev->speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
break;
default:
dev_dbg_f(&intf->dev, "Unknown USB speed\n");
r = -ENODEV;
goto error;
}
r = usb_reset_device(udev);
if (r) {
dev_err(&intf->dev,
"couldn't reset usb device. Error number %d\n", r);
goto error;
}
hw = zd_mac_alloc_hw(intf);
if (hw == NULL) {
r = -ENOMEM;
goto error;
}
usb = &zd_hw_mac(hw)->chip.usb;
usb->is_zd1211b = (id->driver_info == DEVICE_ZD1211B) != 0;
r = zd_mac_preinit_hw(hw);
if (r) {
dev_dbg_f(&intf->dev,
"couldn't initialize mac. Error number %d\n", r);
goto error;
}
r = ieee80211_register_hw(hw);
if (r) {
dev_dbg_f(&intf->dev,
"couldn't register device. Error number %d\n", r);
goto error;
}
dev_dbg_f(&intf->dev, "successful\n");
dev_info(&intf->dev, "%s\n", wiphy_name(hw->wiphy));
return 0;
error:
usb_reset_device(interface_to_usbdev(intf));
if (hw) {
zd_mac_clear(zd_hw_mac(hw));
ieee80211_free_hw(hw);
}
return r;
}
static void disconnect(struct usb_interface *intf)
{
struct ieee80211_hw *hw = zd_intf_to_hw(intf);
struct zd_mac *mac;
struct zd_usb *usb;
/* Either something really bad happened, or we're just dealing with
* a DEVICE_INSTALLER. */
if (hw == NULL)
return;
mac = zd_hw_mac(hw);
usb = &mac->chip.usb;
dev_dbg_f(zd_usb_dev(usb), "\n");
ieee80211_unregister_hw(hw);
/* Just in case something has gone wrong! */
zd_usb_disable_tx(usb);
zd_usb_disable_rx(usb);
zd_usb_disable_int(usb);
/* If the disconnect has been caused by a removal of the
* driver module, the reset allows reloading of the driver. If the
* reset will not be executed here, the upload of the firmware in the
* probe function caused by the reloading of the driver will fail.
*/
usb_reset_device(interface_to_usbdev(intf));
zd_mac_clear(mac);
ieee80211_free_hw(hw);
dev_dbg(&intf->dev, "disconnected\n");
}
static void zd_usb_resume(struct zd_usb *usb)
{
struct zd_mac *mac = zd_usb_to_mac(usb);
int r;
dev_dbg_f(zd_usb_dev(usb), "\n");
r = zd_op_start(zd_usb_to_hw(usb));
if (r < 0) {
dev_warn(zd_usb_dev(usb), "Device resume failed "
"with error code %d. Retrying...\n", r);
if (usb->was_running)
set_bit(ZD_DEVICE_RUNNING, &mac->flags);
usb_queue_reset_device(usb->intf);
return;
}
if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
r = zd_restore_settings(mac);
if (r < 0) {
dev_dbg(zd_usb_dev(usb),
"failed to restore settings, %d\n", r);
return;
}
}
}
static void zd_usb_stop(struct zd_usb *usb)
{
dev_dbg_f(zd_usb_dev(usb), "\n");
zd_op_stop(zd_usb_to_hw(usb));
zd_usb_disable_tx(usb);
zd_usb_disable_rx(usb);
zd_usb_disable_int(usb);
usb->initialized = 0;
}
static int pre_reset(struct usb_interface *intf)
{
struct ieee80211_hw *hw = usb_get_intfdata(intf);
struct zd_mac *mac;
struct zd_usb *usb;
if (!hw || intf->condition != USB_INTERFACE_BOUND)
return 0;
mac = zd_hw_mac(hw);
usb = &mac->chip.usb;
usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
zd_usb_stop(usb);
mutex_lock(&mac->chip.mutex);
return 0;
}
static int post_reset(struct usb_interface *intf)
{
struct ieee80211_hw *hw = usb_get_intfdata(intf);
struct zd_mac *mac;
struct zd_usb *usb;
if (!hw || intf->condition != USB_INTERFACE_BOUND)
return 0;
mac = zd_hw_mac(hw);
usb = &mac->chip.usb;
mutex_unlock(&mac->chip.mutex);
if (usb->was_running)
zd_usb_resume(usb);
return 0;
}
static struct usb_driver driver = {
.name = KBUILD_MODNAME,
.id_table = usb_ids,
.probe = probe,
.disconnect = disconnect,
.pre_reset = pre_reset,
.post_reset = post_reset,
.disable_hub_initiated_lpm = 1,
};
struct workqueue_struct *zd_workqueue;
static int __init usb_init(void)
{
int r;
pr_debug("%s usb_init()\n", driver.name);
zd_workqueue = create_singlethread_workqueue(driver.name);
if (zd_workqueue == NULL) {
printk(KERN_ERR "%s couldn't create workqueue\n", driver.name);
return -ENOMEM;
}
r = usb_register(&driver);
if (r) {
destroy_workqueue(zd_workqueue);
printk(KERN_ERR "%s usb_register() failed. Error number %d\n",
driver.name, r);
return r;
}
pr_debug("%s initialized\n", driver.name);
return 0;
}
static void __exit usb_exit(void)
{
pr_debug("%s usb_exit()\n", driver.name);
usb_deregister(&driver);
destroy_workqueue(zd_workqueue);
}
module_init(usb_init);
module_exit(usb_exit);
static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
int *actual_length, int timeout)
{
/* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
* USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
* descriptor.
*/
struct usb_host_endpoint *ep;
unsigned int pipe;
pipe = usb_sndintpipe(udev, EP_REGS_OUT);
ep = usb_pipe_endpoint(udev, pipe);
if (!ep)
return -EINVAL;
if (usb_endpoint_xfer_int(&ep->desc)) {
return usb_interrupt_msg(udev, pipe, data, len,
actual_length, timeout);
} else {
pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
return usb_bulk_msg(udev, pipe, data, len, actual_length,
timeout);
}
}
static int usb_int_regs_length(unsigned int count)
{
return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
}
static void prepare_read_regs_int(struct zd_usb *usb,
struct usb_req_read_regs *req,
unsigned int count)
{
struct zd_usb_interrupt *intr = &usb->intr;
spin_lock_irq(&intr->lock);
atomic_set(&intr->read_regs_enabled, 1);
intr->read_regs.req = req;
intr->read_regs.req_count = count;
INIT_COMPLETION(intr->read_regs.completion);
spin_unlock_irq(&intr->lock);
}
static void disable_read_regs_int(struct zd_usb *usb)
{
struct zd_usb_interrupt *intr = &usb->intr;
spin_lock_irq(&intr->lock);
atomic_set(&intr->read_regs_enabled, 0);
spin_unlock_irq(&intr->lock);
}
static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
unsigned int count)
{
int i;
struct zd_usb_interrupt *intr = &usb->intr;
struct read_regs_int *rr = &intr->read_regs;
struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
/* The created block size seems to be larger than expected.
* However results appear to be correct.
*/
if (rr->length < usb_int_regs_length(count)) {
dev_dbg_f(zd_usb_dev(usb),
"error: actual length %d less than expected %d\n",
rr->length, usb_int_regs_length(count));
return false;
}
if (rr->length > sizeof(rr->buffer)) {
dev_dbg_f(zd_usb_dev(usb),
"error: actual length %d exceeds buffer size %zu\n",
rr->length, sizeof(rr->buffer));
return false;
}
for (i = 0; i < count; i++) {
struct reg_data *rd = ®s->regs[i];
if (rd->addr != req->addr[i]) {
dev_dbg_f(zd_usb_dev(usb),
"rd[%d] addr %#06hx expected %#06hx\n", i,
le16_to_cpu(rd->addr),
le16_to_cpu(req->addr[i]));
return false;
}
}
return true;
}
static int get_results(struct zd_usb *usb, u16 *values,
struct usb_req_read_regs *req, unsigned int count,
bool *retry)
{
int r;
int i;
struct zd_usb_interrupt *intr = &usb->intr;
struct read_regs_int *rr = &intr->read_regs;
struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
spin_lock_irq(&intr->lock);
r = -EIO;
/* Read failed because firmware bug? */
*retry = !!intr->read_regs_int_overridden;
if (*retry)
goto error_unlock;
if (!check_read_regs(usb, req, count)) {
dev_dbg_f(zd_usb_dev(usb), "error: invalid read regs\n");
goto error_unlock;
}
for (i = 0; i < count; i++) {
struct reg_data *rd = ®s->regs[i];
values[i] = le16_to_cpu(rd->value);
}
r = 0;
error_unlock:
spin_unlock_irq(&intr->lock);
return r;
}
int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
const zd_addr_t *addresses, unsigned int count)
{
int r, i, req_len, actual_req_len, try_count = 0;
struct usb_device *udev;
struct usb_req_read_regs *req = NULL;
unsigned long timeout;
bool retry = false;
if (count < 1) {
dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n");
return -EINVAL;
}
if (count > USB_MAX_IOREAD16_COUNT) {
dev_dbg_f(zd_usb_dev(usb),
"error: count %u exceeds possible max %u\n",
count, USB_MAX_IOREAD16_COUNT);
return -EINVAL;
}
if (in_atomic()) {
dev_dbg_f(zd_usb_dev(usb),
"error: io in atomic context not supported\n");
return -EWOULDBLOCK;
}
if (!usb_int_enabled(usb)) {
dev_dbg_f(zd_usb_dev(usb),
"error: usb interrupt not enabled\n");
return -EWOULDBLOCK;
}
ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
sizeof(__le16) > sizeof(usb->req_buf));
BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
sizeof(usb->req_buf));
req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
req = (void *)usb->req_buf;
req->id = cpu_to_le16(USB_REQ_READ_REGS);
for (i = 0; i < count; i++)
req->addr[i] = cpu_to_le16((u16)addresses[i]);
retry_read:
try_count++;
udev = zd_usb_to_usbdev(usb);
prepare_read_regs_int(usb, req, count);
r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto error;
}
if (req_len != actual_req_len) {
dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
goto error;
}
timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
msecs_to_jiffies(50));
if (!timeout) {
disable_read_regs_int(usb);
dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
r = -ETIMEDOUT;
goto error;
}
r = get_results(usb, values, req, count, &retry);
if (retry && try_count < 20) {
dev_dbg_f(zd_usb_dev(usb), "read retry, tries so far: %d\n",
try_count);
goto retry_read;
}
error:
return r;
}
static void iowrite16v_urb_complete(struct urb *urb)
{
struct zd_usb *usb = urb->context;
if (urb->status && !usb->cmd_error)
usb->cmd_error = urb->status;
if (!usb->cmd_error &&
urb->actual_length != urb->transfer_buffer_length)
usb->cmd_error = -EIO;
}
static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
{
int r = 0;
struct urb *urb = usb->urb_async_waiting;
if (!urb)
return 0;
usb->urb_async_waiting = NULL;
if (!last)
urb->transfer_flags |= URB_NO_INTERRUPT;
usb_anchor_urb(urb, &usb->submitted_cmds);
r = usb_submit_urb(urb, GFP_KERNEL);
if (r) {
usb_unanchor_urb(urb);
dev_dbg_f(zd_usb_dev(usb),
"error in usb_submit_urb(). Error number %d\n", r);
goto error;
}
/* fall-through with r == 0 */
error:
usb_free_urb(urb);
return r;
}
void zd_usb_iowrite16v_async_start(struct zd_usb *usb)
{
ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds));
ZD_ASSERT(usb->urb_async_waiting == NULL);
ZD_ASSERT(!usb->in_async);
ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
usb->in_async = 1;
usb->cmd_error = 0;
usb->urb_async_waiting = NULL;
}
int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout)
{
int r;
ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
ZD_ASSERT(usb->in_async);
/* Submit last iowrite16v URB */
r = zd_submit_waiting_urb(usb, true);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in zd_submit_waiting_usb(). "
"Error number %d\n", r);
usb_kill_anchored_urbs(&usb->submitted_cmds);
goto error;
}
if (timeout)
timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds,
timeout);
if (!timeout) {
usb_kill_anchored_urbs(&usb->submitted_cmds);
if (usb->cmd_error == -ENOENT) {
dev_dbg_f(zd_usb_dev(usb), "timed out");
r = -ETIMEDOUT;
goto error;
}
}
r = usb->cmd_error;
error:
usb->in_async = 0;
return r;
}
int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
unsigned int count)
{
int r;
struct usb_device *udev;
struct usb_req_write_regs *req = NULL;
int i, req_len;
struct urb *urb;
struct usb_host_endpoint *ep;
ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
ZD_ASSERT(usb->in_async);
if (count == 0)
return 0;
if (count > USB_MAX_IOWRITE16_COUNT) {
dev_dbg_f(zd_usb_dev(usb),
"error: count %u exceeds possible max %u\n",
count, USB_MAX_IOWRITE16_COUNT);
return -EINVAL;
}
if (in_atomic()) {
dev_dbg_f(zd_usb_dev(usb),
"error: io in atomic context not supported\n");
return -EWOULDBLOCK;
}
udev = zd_usb_to_usbdev(usb);
ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT));
if (!ep)
return -ENOENT;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
req_len = sizeof(struct usb_req_write_regs) +
count * sizeof(struct reg_data);
req = kmalloc(req_len, GFP_KERNEL);
if (!req) {
r = -ENOMEM;
goto error;
}
req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
for (i = 0; i < count; i++) {
struct reg_data *rw = &req->reg_writes[i];
rw->addr = cpu_to_le16((u16)ioreqs[i].addr);
rw->value = cpu_to_le16(ioreqs[i].value);
}
/* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
* endpoint is bulk. Select correct type URB by endpoint descriptor.
*/
if (usb_endpoint_xfer_int(&ep->desc))
usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb,
ep->desc.bInterval);
else
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb);
urb->transfer_flags |= URB_FREE_BUFFER;
/* Submit previous URB */
r = zd_submit_waiting_urb(usb, false);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in zd_submit_waiting_usb(). "
"Error number %d\n", r);
goto error;
}
/* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs
* of currect batch except for very last.
*/
usb->urb_async_waiting = urb;
return 0;
error:
usb_free_urb(urb);
return r;
}
int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
unsigned int count)
{
int r;
zd_usb_iowrite16v_async_start(usb);
r = zd_usb_iowrite16v_async(usb, ioreqs, count);
if (r) {
zd_usb_iowrite16v_async_end(usb, 0);
return r;
}
return zd_usb_iowrite16v_async_end(usb, 50 /* ms */);
}
int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
{
int r;
struct usb_device *udev;
struct usb_req_rfwrite *req = NULL;
int i, req_len, actual_req_len;
u16 bit_value_template;
if (in_atomic()) {
dev_dbg_f(zd_usb_dev(usb),
"error: io in atomic context not supported\n");
return -EWOULDBLOCK;
}
if (bits < USB_MIN_RFWRITE_BIT_COUNT) {
dev_dbg_f(zd_usb_dev(usb),
"error: bits %d are smaller than"
" USB_MIN_RFWRITE_BIT_COUNT %d\n",
bits, USB_MIN_RFWRITE_BIT_COUNT);
return -EINVAL;
}
if (bits > USB_MAX_RFWRITE_BIT_COUNT) {
dev_dbg_f(zd_usb_dev(usb),
"error: bits %d exceed USB_MAX_RFWRITE_BIT_COUNT %d\n",
bits, USB_MAX_RFWRITE_BIT_COUNT);
return -EINVAL;
}
#ifdef DEBUG
if (value & (~0UL << bits)) {
dev_dbg_f(zd_usb_dev(usb),
"error: value %#09x has bits >= %d set\n",
value, bits);
return -EINVAL;
}
#endif /* DEBUG */
dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits);
r = zd_usb_ioread16(usb, &bit_value_template, ZD_CR203);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error %d: Couldn't read ZD_CR203\n", r);
return r;
}
bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
sizeof(usb->req_buf));
BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
sizeof(usb->req_buf));
req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
req = (void *)usb->req_buf;
req->id = cpu_to_le16(USB_REQ_WRITE_RF);
/* 1: 3683a, but not used in ZYDAS driver */
req->value = cpu_to_le16(2);
req->bits = cpu_to_le16(bits);
for (i = 0; i < bits; i++) {
u16 bv = bit_value_template;
if (value & (1 << (bits-1-i)))
bv |= RF_DATA;
req->bit_values[i] = cpu_to_le16(bv);
}
udev = zd_usb_to_usbdev(usb);
r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto out;
}
if (req_len != actual_req_len) {
dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
goto out;
}
/* FALL-THROUGH with r == 0 */
out:
return r;
}
| gpl-2.0 |
goodhanrry/G9250_goodhanrry_kernel | drivers/scsi/in2000.c | 2557 | 72895 | /*
* in2000.c - Linux device driver for the
* Always IN2000 ISA SCSI card.
*
* Copyright (c) 1996 John Shifflett, GeoLog Consulting
* john@geolog.com
* jshiffle@netcom.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* For the avoidance of doubt the "preferred form" of this code is one which
* is in an open non patent encumbered format. Where cryptographic key signing
* forms part of the process of creating an executable the information
* including keys needed to generate an equivalently functional executable
* are deemed to be part of the source code.
*
* Drew Eckhardt's excellent 'Generic NCR5380' sources provided
* much of the inspiration and some of the code for this driver.
* The Linux IN2000 driver distributed in the Linux kernels through
* version 1.2.13 was an extremely valuable reference on the arcane
* (and still mysterious) workings of the IN2000's fifo. It also
* is where I lifted in2000_biosparam(), the gist of the card
* detection scheme, and other bits of code. Many thanks to the
* talented and courageous people who wrote, contributed to, and
* maintained that driver (including Brad McLean, Shaun Savage,
* Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
* Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
* Youngdale). I should also mention the driver written by
* Hamish Macdonald for the (GASP!) Amiga A2091 card, included
* in the Linux-m68k distribution; it gave me a good initial
* understanding of the proper way to run a WD33c93 chip, and I
* ended up stealing lots of code from it.
*
* _This_ driver is (I feel) an improvement over the old one in
* several respects:
* - All problems relating to the data size of a SCSI request are
* gone (as far as I know). The old driver couldn't handle
* swapping to partitions because that involved 4k blocks, nor
* could it deal with the st.c tape driver unmodified, because
* that usually involved 4k - 32k blocks. The old driver never
* quite got away from a morbid dependence on 2k block sizes -
* which of course is the size of the card's fifo.
*
* - Target Disconnection/Reconnection is now supported. Any
* system with more than one device active on the SCSI bus
* will benefit from this. The driver defaults to what I'm
* calling 'adaptive disconnect' - meaning that each command
* is evaluated individually as to whether or not it should
* be run with the option to disconnect/reselect (if the
* device chooses), or as a "SCSI-bus-hog".
*
* - Synchronous data transfers are now supported. Because there
* are a few devices (and many improperly terminated systems)
* that choke when doing sync, the default is sync DISABLED
* for all devices. This faster protocol can (and should!)
* be enabled on selected devices via the command-line.
*
* - Runtime operating parameters can now be specified through
* either the LILO or the 'insmod' command line. For LILO do:
* "in2000=blah,blah,blah"
* and with insmod go like:
* "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
* The defaults should be good for most people. See the comment
* for 'setup_strings' below for more details.
*
* - The old driver relied exclusively on what the Western Digital
* docs call "Combination Level 2 Commands", which are a great
* idea in that the CPU is relieved of a lot of interrupt
* overhead. However, by accepting a certain (user-settable)
* amount of additional interrupts, this driver achieves
* better control over the SCSI bus, and data transfers are
* almost as fast while being much easier to define, track,
* and debug.
*
* - You can force detection of a card whose BIOS has been disabled.
*
* - Multiple IN2000 cards might almost be supported. I've tried to
* keep it in mind, but have no way to test...
*
*
* TODO:
* tagged queuing. multiple cards.
*
*
* NOTE:
* When using this or any other SCSI driver as a module, you'll
* find that with the stock kernel, at most _two_ SCSI hard
* drives will be linked into the device list (ie, usable).
* If your IN2000 card has more than 2 disks on its bus, you
* might want to change the define of 'SD_EXTRA_DEVS' in the
* 'hosts.h' file from 2 to whatever is appropriate. It took
* me a while to track down this surprisingly obscure and
* undocumented little "feature".
*
*
* People with bug reports, wish-lists, complaints, comments,
* or improvements are asked to pah-leeez email me (John Shifflett)
* at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
* this thing into as good a shape as possible, and I'm positive
* there are lots of lurking bugs and "Stupid Places".
*
* Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Using new_eh handler
* - Hopefully got all the locking right again
* See "FIXME" notes for items that could do with more work
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/ioport.h>
#include <linux/stat.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#define IN2000_VERSION "1.33-2.5"
#define IN2000_DATE "2002/11/03"
#include "in2000.h"
/*
* 'setup_strings' is a single string used to pass operating parameters and
* settings from the kernel/module command-line to the driver. 'setup_args[]'
* is an array of strings that define the compile-time default values for
* these settings. If Linux boots with a LILO or insmod command-line, those
* settings are combined with 'setup_args[]'. Note that LILO command-lines
* are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
* The driver recognizes the following keywords (lower case required) and
* arguments:
*
* - ioport:addr -Where addr is IO address of a (usually ROM-less) card.
* - noreset -No optional args. Prevents SCSI bus reset at boot time.
* - nosync:x -x is a bitmask where the 1st 7 bits correspond with
* the 7 possible SCSI devices (bit 0 for device #0, etc).
* Set a bit to PREVENT sync negotiation on that device.
* The driver default is sync DISABLED on all devices.
* - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
* period. Default is 500; acceptable values are 250 - 1000.
* - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
* x = 1 does 'adaptive' disconnects, which is the default
* and generally the best choice.
* - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
* various types of debug output to printed - see the DB_xxx
* defines in in2000.h
* - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that
* determines how the /proc interface works and what it
* does - see the PR_xxx defines in in2000.h
*
* Syntax Notes:
* - Numeric arguments can be decimal or the '0x' form of hex notation. There
* _must_ be a colon between a keyword and its numeric argument, with no
* spaces.
* - Keywords are separated by commas, no spaces, in the standard kernel
* command-line manner.
* - A keyword in the 'nth' comma-separated command-line member will overwrite
* the 'nth' element of setup_args[]. A blank command-line member (in
* other words, a comma with no preceding keyword) will _not_ overwrite
* the corresponding setup_args[] element.
*
* A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
* - in2000=ioport:0x220,noreset
* - in2000=period:250,disconnect:2,nosync:0x03
* - in2000=debug:0x1e
* - in2000=proc:3
*/
/* Normally, no defaults are specified... */
static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
/* filled in by 'insmod' */
static char *setup_strings;
module_param(setup_strings, charp, 0);
static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
{
write1_io(reg_num, IO_WD_ADDR);
return read1_io(IO_WD_DATA);
}
#define READ_AUX_STAT() read1_io(IO_WD_ASR)
static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
{
write1_io(reg_num, IO_WD_ADDR);
write1_io(value, IO_WD_DATA);
}
static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
{
/* while (READ_AUX_STAT() & ASR_CIP)
printk("|");*/
write1_io(WD_COMMAND, IO_WD_ADDR);
write1_io(cmd, IO_WD_DATA);
}
static uchar read_1_byte(struct IN2000_hostdata *hostdata)
{
uchar asr, x = 0;
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80);
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
x = read_3393(hostdata, WD_DATA);
} while (!(asr & ASR_INT));
return x;
}
static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
{
write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
write1_io((value >> 16), IO_WD_DATA);
write1_io((value >> 8), IO_WD_DATA);
write1_io(value, IO_WD_DATA);
}
static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
{
unsigned long value;
write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
value = read1_io(IO_WD_DATA) << 16;
value |= read1_io(IO_WD_DATA) << 8;
value |= read1_io(IO_WD_DATA);
return value;
}
/* The 33c93 needs to be told which direction a command transfers its
* data; we use this function to figure it out. Returns true if there
* will be a DATA_OUT phase with this command, false otherwise.
* (Thanks to Joerg Dorchain for the research and suggestion.)
*/
static int is_dir_out(Scsi_Cmnd * cmd)
{
switch (cmd->cmnd[0]) {
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_LONG:
case WRITE_SAME:
case WRITE_BUFFER:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case COMPARE:
case COPY:
case COPY_VERIFY:
case SEARCH_EQUAL:
case SEARCH_HIGH:
case SEARCH_LOW:
case SEARCH_EQUAL_12:
case SEARCH_HIGH_12:
case SEARCH_LOW_12:
case FORMAT_UNIT:
case REASSIGN_BLOCKS:
case RESERVE:
case MODE_SELECT:
case MODE_SELECT_10:
case LOG_SELECT:
case SEND_DIAGNOSTIC:
case CHANGE_DEFINITION:
case UPDATE_BLOCK:
case SET_WINDOW:
case MEDIUM_SCAN:
case SEND_VOLUME_TAG:
case 0xea:
return 1;
default:
return 0;
}
}
static struct sx_period sx_table[] = {
{1, 0x20},
{252, 0x20},
{376, 0x30},
{500, 0x40},
{624, 0x50},
{752, 0x60},
{876, 0x70},
{1000, 0x00},
{0, 0}
};
static int round_period(unsigned int period)
{
int x;
for (x = 1; sx_table[x].period_ns; x++) {
if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) {
return x;
}
}
return 7;
}
static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
{
uchar result;
period *= 4; /* convert SDTR code to ns */
result = sx_table[round_period(period)].reg_value;
result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF;
return result;
}
static void in2000_execute(struct Scsi_Host *instance);
static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *tmp;
instance = cmd->device->host;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
/* Set up a few fields in the Scsi_Cmnd structure for our own use:
* - host_scribble is the pointer to the next cmd in the input queue
* - scsi_done points to the routine we call when a cmd is finished
* - result is what you'd expect
*/
cmd->host_scribble = NULL;
cmd->scsi_done = done;
cmd->result = 0;
/* We use the Scsi_Pointer structure that's included with each command
* as a scratchpad (as it's intended to be used!). The handy thing about
* the SCp.xxx fields is that they're always associated with a given
* cmd, and are preserved across disconnect-reselect. This means we
* can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
* if we keep all the critical pointers and counters in SCp:
* - SCp.ptr is the pointer into the RAM buffer
* - SCp.this_residual is the size of that buffer
* - SCp.buffer points to the current scatter-gather buffer
* - SCp.buffers_residual tells us how many S.G. buffers there are
* - SCp.have_data_in helps keep track of >2048 byte transfers
* - SCp.sent_command is not used
* - SCp.phase records this command's SRCID_ER bit setting
*/
if (scsi_bufflen(cmd)) {
cmd->SCp.buffer = scsi_sglist(cmd);
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
cmd->SCp.ptr = NULL;
cmd->SCp.this_residual = 0;
}
cmd->SCp.have_data_in = 0;
/* We don't set SCp.phase here - that's done in in2000_execute() */
/* WD docs state that at the conclusion of a "LEVEL2" command, the
* status byte can be retrieved from the LUN register. Apparently,
* this is the case only for *uninterrupted* LEVEL2 commands! If
* there are any unexpected phases entered, even if they are 100%
* legal (different devices may choose to do things differently),
* the LEVEL2 command sequence is exited. This often occurs prior
* to receiving the status byte, in which case the driver does a
* status phase interrupt and gets the status byte on its own.
* While such a command can then be "resumed" (ie restarted to
* finish up as a LEVEL2 command), the LUN register will NOT be
* a valid status byte at the command's conclusion, and we must
* use the byte obtained during the earlier interrupt. Here, we
* preset SCp.Status to an illegal value (0xff) so that when
* this command finally completes, we can tell where the actual
* status byte is stored.
*/
cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
/* We need to disable interrupts before messing with the input
* queue and calling in2000_execute().
*/
/*
* Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
* commands are added to the head of the queue so that the desired
* sense data is not lost before REQUEST_SENSE executes.
*/
if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
cmd->host_scribble = (uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
} else { /* find the end of the queue */
for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble);
tmp->host_scribble = (uchar *) cmd;
}
/* We know that there's at least one command in 'input_Q' now.
* Go see if any of them are runnable!
*/
in2000_execute(cmd->device->host);
DB(DB_QUEUE_COMMAND, printk(")Q "))
return 0;
}
static DEF_SCSI_QCMD(in2000_queuecommand)
/*
* This routine attempts to start a scsi command. If the host_card is
* already connected, we give up immediately. Otherwise, look through
* the input_Q, using the first command we find that's intended
* for a currently non-busy target/lun.
* Note that this function is always called with interrupts already
* disabled (either from in2000_queuecommand() or in2000_intr()).
*/
static void in2000_execute(struct Scsi_Host *instance)
{
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *cmd, *prev;
int i;
unsigned short *sp;
unsigned short f;
unsigned short flushbuf[16];
hostdata = (struct IN2000_hostdata *) instance->hostdata;
DB(DB_EXECUTE, printk("EX("))
if (hostdata->selecting || hostdata->connected) {
DB(DB_EXECUTE, printk(")EX-0 "))
return;
}
/*
* Search through the input_Q for a command destined
* for an idle target/lun.
*/
cmd = (Scsi_Cmnd *) hostdata->input_Q;
prev = NULL;
while (cmd) {
if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
break;
prev = cmd;
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
/* quit if queue empty or all possible targets are busy */
if (!cmd) {
DB(DB_EXECUTE, printk(")EX-1 "))
return;
}
/* remove command from queue */
if (prev)
prev->host_scribble = cmd->host_scribble;
else
hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble;
#ifdef PROC_STATISTICS
hostdata->cmd_cnt[cmd->device->id]++;
#endif
/*
* Start the selection process
*/
if (is_dir_out(cmd))
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
else
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
/* Now we need to figure out whether or not this command is a good
* candidate for disconnect/reselect. We guess to the best of our
* ability, based on a set of hierarchical rules. When several
* devices are operating simultaneously, disconnects are usually
* an advantage. In a single device system, or if only 1 device
* is being accessed, transfers usually go faster if disconnects
* are not allowed:
*
* + Commands should NEVER disconnect if hostdata->disconnect =
* DIS_NEVER (this holds for tape drives also), and ALWAYS
* disconnect if hostdata->disconnect = DIS_ALWAYS.
* + Tape drive commands should always be allowed to disconnect.
* + Disconnect should be allowed if disconnected_Q isn't empty.
* + Commands should NOT disconnect if input_Q is empty.
* + Disconnect should be allowed if there are commands in input_Q
* for a different target/lun. In this case, the other commands
* should be made disconnect-able, if not already.
*
* I know, I know - this code would flunk me out of any
* "C Programming 101" class ever offered. But it's easy
* to change around and experiment with for now.
*/
cmd->SCp.phase = 0; /* assume no disconnect */
if (hostdata->disconnect == DIS_NEVER)
goto no;
if (hostdata->disconnect == DIS_ALWAYS)
goto yes;
if (cmd->device->type == 1) /* tape drive? */
goto yes;
if (hostdata->disconnected_Q) /* other commands disconnected? */
goto yes;
if (!(hostdata->input_Q)) /* input_Q empty? */
goto no;
for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) {
if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) {
for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble)
prev->SCp.phase = 1;
goto yes;
}
}
goto no;
yes:
cmd->SCp.phase = 1;
#ifdef PROC_STATISTICS
hostdata->disc_allowed_cnt[cmd->device->id]++;
#endif
no:
write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun);
write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
/*
* Do a 'Select-With-ATN' command. This will end with
* one of the following interrupts:
* CSR_RESEL_AM: failure - can try again later.
* CSR_TIMEOUT: failure - give up.
* CSR_SELECT: success - proceed.
*/
hostdata->selecting = cmd;
/* Every target has its own synchronous transfer setting, kept in
* the sync_xfer array, and a corresponding status byte in sync_stat[].
* Each target's sync_stat[] entry is initialized to SS_UNSET, and its
* sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
* means that the parameters are undetermined as yet, and that we
* need to send an SDTR message to this device after selection is
* complete. We set SS_FIRST to tell the interrupt routine to do so,
* unless we don't want to even _try_ synchronous transfers: In this
* case we set SS_SET to make the defaults final.
*/
if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) {
if (hostdata->sync_off & (1 << cmd->device->id))
hostdata->sync_stat[cmd->device->id] = SS_SET;
else
hostdata->sync_stat[cmd->device->id] = SS_FIRST;
}
hostdata->state = S_SELECTING;
write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
write_3393_cmd(hostdata, WD_CMD_SEL_ATN);
}
else {
/*
* Do a 'Select-With-ATN-Xfer' command. This will end with
* one of the following interrupts:
* CSR_RESEL_AM: failure - can try again later.
* CSR_TIMEOUT: failure - give up.
* anything else: success - proceed.
*/
hostdata->connected = cmd;
write_3393(hostdata, WD_COMMAND_PHASE, 0);
/* copy command_descriptor_block into WD chip
* (take advantage of auto-incrementing)
*/
write1_io(WD_CDB_1, IO_WD_ADDR);
for (i = 0; i < cmd->cmd_len; i++)
write1_io(cmd->cmnd[i], IO_WD_DATA);
/* The wd33c93 only knows about Group 0, 1, and 5 commands when
* it's doing a 'select-and-transfer'. To be safe, we write the
* size of the CDB into the OWN_ID register for every case. This
* way there won't be problems with vendor-unique, audio, etc.
*/
write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
/* When doing a non-disconnect command, we can save ourselves a DATA
* phase interrupt later by setting everything up now. With writes we
* need to pre-fill the fifo; if there's room for the 32 flush bytes,
* put them in there too - that'll avoid a fifo interrupt. Reads are
* somewhat simpler.
* KLUDGE NOTE: It seems that you can't completely fill the fifo here:
* This results in the IO_FIFO_COUNT register rolling over to zero,
* and apparently the gate array logic sees this as empty, not full,
* so the 3393 chip is never signalled to start reading from the
* fifo. Or maybe it's seen as a permanent fifo interrupt condition.
* Regardless, we fix this by temporarily pretending that the fifo
* is 16 bytes smaller. (I see now that the old driver has a comment
* about "don't fill completely" in an analogous place - must be the
* same deal.) This results in CDROM, swap partitions, and tape drives
* needing an extra interrupt per write command - I think we can live
* with that!
*/
if (!(cmd->SCp.phase)) {
write_3393_count(hostdata, cmd->SCp.this_residual);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */
if (is_dir_out(cmd)) {
hostdata->fifo = FI_FIFO_WRITING;
if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16))
i = IN2000_FIFO_SIZE - 16;
cmd->SCp.have_data_in = i; /* this much data in fifo */
i >>= 1; /* Gulp. Assuming modulo 2. */
sp = (unsigned short *) cmd->SCp.ptr;
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_WRITE_IO
FAST_WRITE2_IO();
#else
while (i--)
write2_io(*sp++, IO_FIFO);
#endif
/* Is there room for the flush bytes? */
if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
sp = flushbuf;
i = 16;
#ifdef FAST_WRITE_IO
FAST_WRITE2_IO();
#else
while (i--)
write2_io(0, IO_FIFO);
#endif
}
}
else {
write1_io(0, IO_FIFO_READ); /* put fifo in read mode */
hostdata->fifo = FI_FIFO_READING;
cmd->SCp.have_data_in = 0; /* nothing transferred yet */
}
} else {
write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
}
hostdata->state = S_RUNNING_LEVEL2;
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
}
/*
* Since the SCSI bus can handle only 1 connection at a time,
* we get out of here now. If the selection fails, or when
* the command disconnects, we'll come back to this routine
* to search the input_Q again...
*/
DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
}
static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata)
{
uchar asr;
DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out"))
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393_count(hostdata, cnt);
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
if (data_in_dir) {
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
*buf++ = read_3393(hostdata, WD_DATA);
} while (!(asr & ASR_INT));
} else {
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
write_3393(hostdata, WD_DATA, *buf++);
} while (!(asr & ASR_INT));
}
/* Note: we are returning with the interrupt UN-cleared.
* Since (presumably) an entire I/O operation has
* completed, the bus phase is probably different, and
* the interrupt routine will discover this when it
* responds to the uncleared int.
*/
}
static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
{
struct IN2000_hostdata *hostdata;
unsigned short *sp;
unsigned short f;
int i;
hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata;
/* Normally, you'd expect 'this_residual' to be non-zero here.
* In a series of scatter-gather transfers, however, this
* routine will usually be called with 'this_residual' equal
* to 0 and 'buffers_residual' non-zero. This means that a
* previous transfer completed, clearing 'this_residual', and
* now we need to setup the next scatter-gather buffer as the
* source or destination for THIS transfer.
*/
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
/* Set up hardware registers */
write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
write_3393_count(hostdata, cmd->SCp.this_residual);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */
/* Reading is easy. Just issue the command and return - we'll
* get an interrupt later when we have actual data to worry about.
*/
if (data_in_dir) {
write1_io(0, IO_FIFO_READ);
if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
hostdata->fifo = FI_FIFO_READING;
cmd->SCp.have_data_in = 0;
return;
}
/* Writing is more involved - we'll start the WD chip and write as
* much data to the fifo as we can right now. Later interrupts will
* write any bytes that don't make it at this stage.
*/
if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
hostdata->fifo = FI_FIFO_WRITING;
sp = (unsigned short *) cmd->SCp.ptr;
if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
i = IN2000_FIFO_SIZE;
cmd->SCp.have_data_in = i;
i >>= 1; /* Gulp. We assume this_residual is modulo 2 */
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_WRITE_IO
FAST_WRITE2_IO();
#else
while (i--)
write2_io(*sp++, IO_FIFO);
#endif
}
/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
* function in order to work in an SMP environment. (I'd be surprised
* if the driver is ever used by anyone on a real multi-CPU motherboard,
* but it _does_ need to be able to compile and run in an SMP kernel.)
*/
static irqreturn_t in2000_intr(int irqnum, void *dev_id)
{
struct Scsi_Host *instance = dev_id;
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *patch, *cmd;
uchar asr, sr, phs, id, lun, *ucp, msg;
int i, j;
unsigned long length;
unsigned short *sp;
unsigned short f;
unsigned long flags;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
/* Get the spin_lock and disable further ints, for SMP */
spin_lock_irqsave(instance->host_lock, flags);
#ifdef PROC_STATISTICS
hostdata->int_cnt++;
#endif
/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
* WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
* with a big logic array, so it's a little different than what you might
* expect). As far as I know, there's no reason that BOTH can't be active
* at the same time, but there's a problem: while we can read the 3393
* to tell if _it_ wants an interrupt, I don't know of a way to ask the
* fifo the same question. The best we can do is check the 3393 and if
* it _isn't_ the source of the interrupt, then we can be pretty sure
* that the fifo is the culprit.
* UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
* IO_FIFO_COUNT register mirrors the fifo interrupt state. I
* assume that bit clear means interrupt active. As it turns
* out, the driver really doesn't need to check for this after
* all, so my remarks above about a 'problem' can safely be
* ignored. The way the logic is set up, there's no advantage
* (that I can see) to worrying about it.
*
* It seems that the fifo interrupt signal is negated when we extract
* bytes during read or write bytes during write.
* - fifo will interrupt when data is moving from it to the 3393, and
* there are 31 (or less?) bytes left to go. This is sort of short-
* sighted: what if you don't WANT to do more? In any case, our
* response is to push more into the fifo - either actual data or
* dummy bytes if need be. Note that we apparently have to write at
* least 32 additional bytes to the fifo after an interrupt in order
* to get it to release the ones it was holding on to - writing fewer
* than 32 will result in another fifo int.
* UPDATE: Again, info from Bill Earnest makes this more understandable:
* 32 bytes = two counts of the fifo counter register. He tells
* me that the fifo interrupt is a non-latching signal derived
* from a straightforward boolean interpretation of the 7
* highest bits of the fifo counter and the fifo-read/fifo-write
* state. Who'd a thought?
*/
write1_io(0, IO_LED_ON);
asr = READ_AUX_STAT();
if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */
/* Ok. This is definitely a FIFO-only interrupt.
*
* If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
* maybe more to come from the SCSI bus. Read as many as we can out of the
* fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
* update have_data_in afterwards.
*
* If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
* into the WD3393 chip (I think the interrupt happens when there are 31
* bytes left, but it may be fewer...). The 3393 is still waiting, so we
* shove some more into the fifo, which gets things moving again. If the
* original SCSI command specified more than 2048 bytes, there may still
* be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
* Don't forget to update have_data_in. If we've already written out the
* entire buffer, feed 32 dummy bytes to the fifo - they're needed to
* push out the remaining real data.
* (Big thanks to Bill Earnest for getting me out of the mud in here.)
*/
cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
CHECK_NULL(cmd, "fifo_int")
if (hostdata->fifo == FI_FIFO_READING) {
DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT)))
sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i = read1_io(IO_FIFO_COUNT) & 0xfe;
i <<= 2; /* # of words waiting in the fifo */
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_READ_IO
FAST_READ2_IO();
#else
while (i--)
*sp++ = read2_io(IO_FIFO);
#endif
i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i <<= 1;
cmd->SCp.have_data_in += i;
}
else if (hostdata->fifo == FI_FIFO_WRITING) {
DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT)))
/* If all bytes have been written to the fifo, flush out the stragglers.
* Note that while writing 16 dummy words seems arbitrary, we don't
* have another choice that I can see. What we really want is to read
* the 3393 transfer count register (that would tell us how many bytes
* needed flushing), but the TRANSFER_INFO command hasn't completed
* yet (not enough bytes!) and that register won't be accessible. So,
* we use 16 words - a number obtained through trial and error.
* UPDATE: Bill says this is exactly what Always does, so there.
* More thanks due him for help in this section.
*/
if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
i = 16;
while (i--) /* write 32 dummy bytes */
write2_io(0, IO_FIFO);
}
/* If there are still bytes left in the SCSI buffer, write as many as we
* can out to the fifo.
*/
else {
sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */
j = read1_io(IO_FIFO_COUNT) & 0xfe;
j <<= 2; /* how many words the fifo has room for */
if ((j << 1) > i)
j = (i >> 1);
while (j--)
write2_io(*sp++, IO_FIFO);
i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i <<= 1;
cmd->SCp.have_data_in += i;
}
}
else {
printk("*** Spurious FIFO interrupt ***");
}
write1_io(0, IO_LED_OFF);
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
* may also be asserted, but we don't bother to check it: we get more
* detailed info from FIFO_READING and FIFO_WRITING (see below).
*/
cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */
phs = read_3393(hostdata, WD_COMMAND_PHASE);
if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
printk("\nNR:wd-intr-1\n");
write1_io(0, IO_LED_OFF);
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
DB(DB_INTR, printk("{%02x:%02x-", asr, sr))
/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
* guaranteed to be in response to the completion of the transfer.
* If we were reading, there's probably data in the fifo that needs
* to be copied into RAM - do that here. Also, we have to update
* 'this_residual' and 'ptr' based on the contents of the
* TRANSFER_COUNT register, in case the device decided to do an
* intermediate disconnect (a device may do this if it has to
* do a seek, or just to be nice and let other devices have
* some bus time during long transfers).
* After doing whatever is necessary with the fifo, we go on and
* service the WD3393 interrupt normally.
*/
if (hostdata->fifo == FI_FIFO_READING) {
/* buffer index = start-of-buffer + #-of-bytes-already-read */
sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
i >>= 1; /* Gulp. We assume this will always be modulo 2 */
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_READ_IO
FAST_READ2_IO();
#else
while (i--)
*sp++ = read2_io(IO_FIFO);
#endif
hostdata->fifo = FI_FIFO_UNUSED;
length = cmd->SCp.this_residual;
cmd->SCp.this_residual = read_3393_count(hostdata);
cmd->SCp.ptr += (length - cmd->SCp.this_residual);
DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
}
else if (hostdata->fifo == FI_FIFO_WRITING) {
hostdata->fifo = FI_FIFO_UNUSED;
length = cmd->SCp.this_residual;
cmd->SCp.this_residual = read_3393_count(hostdata);
cmd->SCp.ptr += (length - cmd->SCp.this_residual);
DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
}
/* Respond to the specific WD3393 interrupt - there are quite a few! */
switch (sr) {
case CSR_TIMEOUT:
DB(DB_INTR, printk("TIMEOUT"))
if (hostdata->state == S_RUNNING_LEVEL2)
hostdata->connected = NULL;
else {
cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */
CHECK_NULL(cmd, "csr_timeout")
hostdata->selecting = NULL;
}
cmd->result = DID_NO_CONNECT << 16;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
cmd->scsi_done(cmd);
/* We are not connected to a target - check to see if there
* are commands waiting to be executed.
*/
in2000_execute(instance);
break;
/* Note: this interrupt should not occur in a LEVEL2 command */
case CSR_SELECT:
DB(DB_INTR, printk("SELECT"))
hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting;
CHECK_NULL(cmd, "csr_select")
hostdata->selecting = NULL;
/* construct an IDENTIFY message with correct disconnect bit */
hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun);
if (cmd->SCp.phase)
hostdata->outgoing_msg[0] |= 0x40;
if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
#ifdef SYNC_DEBUG
printk(" sending SDTR ");
#endif
hostdata->sync_stat[cmd->device->id] = SS_WAITING;
/* tack on a 2nd message to ask about synchronous transfers */
hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
hostdata->outgoing_msg[2] = 3;
hostdata->outgoing_msg[3] = EXTENDED_SDTR;
hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4;
hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
hostdata->outgoing_len = 6;
} else
hostdata->outgoing_len = 1;
hostdata->state = S_CONNECTED;
break;
case CSR_XFER_DONE | PHS_DATA_IN:
case CSR_UNEXP | PHS_DATA_IN:
case CSR_SRV_REQ | PHS_DATA_IN:
DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
transfer_bytes(cmd, DATA_IN_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
break;
case CSR_XFER_DONE | PHS_DATA_OUT:
case CSR_UNEXP | PHS_DATA_OUT:
case CSR_SRV_REQ | PHS_DATA_OUT:
DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
transfer_bytes(cmd, DATA_OUT_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
break;
/* Note: this interrupt should not occur in a LEVEL2 command */
case CSR_XFER_DONE | PHS_COMMAND:
case CSR_UNEXP | PHS_COMMAND:
case CSR_SRV_REQ | PHS_COMMAND:
DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
hostdata->state = S_CONNECTED;
break;
case CSR_XFER_DONE | PHS_STATUS:
case CSR_UNEXP | PHS_STATUS:
case CSR_SRV_REQ | PHS_STATUS:
DB(DB_INTR, printk("STATUS="))
cmd->SCp.Status = read_1_byte(hostdata);
DB(DB_INTR, printk("%02x", cmd->SCp.Status))
if (hostdata->level2 >= L2_BASIC) {
sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
hostdata->state = S_RUNNING_LEVEL2;
write_3393(hostdata, WD_COMMAND_PHASE, 0x50);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
} else {
hostdata->state = S_CONNECTED;
}
break;
case CSR_XFER_DONE | PHS_MESS_IN:
case CSR_UNEXP | PHS_MESS_IN:
case CSR_SRV_REQ | PHS_MESS_IN:
DB(DB_INTR, printk("MSG_IN="))
msg = read_1_byte(hostdata);
sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
msg = EXTENDED_MESSAGE;
else
hostdata->incoming_ptr = 0;
cmd->SCp.Message = msg;
switch (msg) {
case COMMAND_COMPLETE:
DB(DB_INTR, printk("CCMP"))
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_PRE_CMP_DISC;
break;
case SAVE_POINTERS:
DB(DB_INTR, printk("SDP"))
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
case RESTORE_POINTERS:
DB(DB_INTR, printk("RDP"))
if (hostdata->level2 >= L2_BASIC) {
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else {
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
}
break;
case DISCONNECT:
DB(DB_INTR, printk("DIS"))
cmd->device->disconnect = 1;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_PRE_TMP_DISC;
break;
case MESSAGE_REJECT:
DB(DB_INTR, printk("REJ"))
#ifdef SYNC_DEBUG
printk("-REJ-");
#endif
if (hostdata->sync_stat[cmd->device->id] == SS_WAITING)
hostdata->sync_stat[cmd->device->id] = SS_SET;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
case EXTENDED_MESSAGE:
DB(DB_INTR, printk("EXT"))
ucp = hostdata->incoming_msg;
#ifdef SYNC_DEBUG
printk("%02x", ucp[hostdata->incoming_ptr]);
#endif
/* Is this the last byte of the extended message? */
if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) {
switch (ucp[2]) { /* what's the EXTENDED code? */
case EXTENDED_SDTR:
id = calc_sync_xfer(ucp[3], ucp[4]);
if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) {
/* A device has sent an unsolicited SDTR message; rather than go
* through the effort of decoding it and then figuring out what
* our reply should be, we're just gonna say that we have a
* synchronous fifo depth of 0. This will result in asynchronous
* transfers - not ideal but so much easier.
* Actually, this is OK because it assures us that if we don't
* specifically ask for sync transfers, we won't do any.
*/
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
hostdata->outgoing_msg[1] = 3;
hostdata->outgoing_msg[2] = EXTENDED_SDTR;
hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4;
hostdata->outgoing_msg[4] = 0;
hostdata->outgoing_len = 5;
hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0);
} else {
hostdata->sync_xfer[cmd->device->id] = id;
}
#ifdef SYNC_DEBUG
printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]);
#endif
hostdata->sync_stat[cmd->device->id] = SS_SET;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
case EXTENDED_WDTR:
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
printk("sending WDTR ");
hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
hostdata->outgoing_msg[1] = 2;
hostdata->outgoing_msg[2] = EXTENDED_WDTR;
hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
hostdata->outgoing_len = 4;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
default:
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]);
hostdata->outgoing_msg[0] = MESSAGE_REJECT;
hostdata->outgoing_len = 1;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
}
hostdata->incoming_ptr = 0;
}
/* We need to read more MESS_IN bytes for the extended message */
else {
hostdata->incoming_ptr++;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
}
break;
default:
printk("Rejecting Unknown Message(%02x) ", msg);
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
hostdata->outgoing_msg[0] = MESSAGE_REJECT;
hostdata->outgoing_len = 1;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
}
break;
/* Note: this interrupt will occur only after a LEVEL2 command */
case CSR_SEL_XFER_DONE:
/* Make sure that reselection is enabled at this point - it may
* have been turned off for the command that just completed.
*/
write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
if (phs == 0x60) {
DB(DB_INTR, printk("SX-DONE"))
cmd->SCp.Message = COMMAND_COMPLETE;
lun = read_3393(hostdata, WD_TARGET_LUN);
DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
cmd->SCp.Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
else
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
*/
in2000_execute(instance);
} else {
printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
}
break;
/* Note: this interrupt will occur only after a LEVEL2 command */
case CSR_SDP:
DB(DB_INTR, printk("SDP"))
hostdata->state = S_RUNNING_LEVEL2;
write_3393(hostdata, WD_COMMAND_PHASE, 0x41);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
break;
case CSR_XFER_DONE | PHS_MESS_OUT:
case CSR_UNEXP | PHS_MESS_OUT:
case CSR_SRV_REQ | PHS_MESS_OUT:
DB(DB_INTR, printk("MSG_OUT="))
/* To get here, we've probably requested MESSAGE_OUT and have
* already put the correct bytes in outgoing_msg[] and filled
* in outgoing_len. We simply send them out to the SCSI bus.
* Sometimes we get MESSAGE_OUT phase when we're not expecting
* it - like when our SDTR message is rejected by a target. Some
* targets send the REJECT before receiving all of the extended
* message, and then seem to go back to MESSAGE_OUT for a byte
* or two. Not sure why, or if I'm doing something wrong to
* cause this to happen. Regardless, it seems that sending
* NOP messages in these situations results in no harm and
* makes everyone happy.
*/
if (hostdata->outgoing_len == 0) {
hostdata->outgoing_len = 1;
hostdata->outgoing_msg[0] = NOP;
}
transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata);
DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0]))
hostdata->outgoing_len = 0;
hostdata->state = S_CONNECTED;
break;
case CSR_UNEXP_DISC:
/* I think I've seen this after a request-sense that was in response
* to an error condition, but not sure. We certainly need to do
* something when we get this interrupt - the question is 'what?'.
* Let's think positively, and assume some command has finished
* in a legal manner (like a command that provokes a request-sense),
* so we treat it as a normal command-complete-disconnect.
*/
/* Make sure that reselection is enabled at this point - it may
* have been turned off for the command that just completed.
*/
write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
if (cmd == NULL) {
printk(" - Already disconnected! ");
hostdata->state = S_UNCONNECTED;
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
DB(DB_INTR, printk("UNEXP_DISC"))
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
else
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
*/
in2000_execute(instance);
break;
case CSR_DISC:
/* Make sure that reselection is enabled at this point - it may
* have been turned off for the command that just completed.
*/
write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
DB(DB_INTR, printk("DISC"))
if (cmd == NULL) {
printk(" - Already disconnected! ");
hostdata->state = S_UNCONNECTED;
}
switch (hostdata->state) {
case S_PRE_CMP_DISC:
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
DB(DB_INTR, printk(":%d", cmd->SCp.Status))
if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
else
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
cmd->scsi_done(cmd);
break;
case S_PRE_TMP_DISC:
case S_RUNNING_LEVEL2:
cmd->host_scribble = (uchar *) hostdata->disconnected_Q;
hostdata->disconnected_Q = cmd;
hostdata->connected = NULL;
hostdata->state = S_UNCONNECTED;
#ifdef PROC_STATISTICS
hostdata->disc_done_cnt[cmd->device->id]++;
#endif
break;
default:
printk("*** Unexpected DISCONNECT interrupt! ***");
hostdata->state = S_UNCONNECTED;
}
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
*/
in2000_execute(instance);
break;
case CSR_RESEL_AM:
DB(DB_INTR, printk("RESEL"))
/* First we have to make sure this reselection didn't */
/* happen during Arbitration/Selection of some other device. */
/* If yes, put losing command back on top of input_Q. */
if (hostdata->level2 <= L2_NONE) {
if (hostdata->selecting) {
cmd = (Scsi_Cmnd *) hostdata->selecting;
hostdata->selecting = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
cmd->host_scribble = (uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
}
}
else {
if (cmd) {
if (phs == 0x00) {
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
cmd->host_scribble = (uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
} else {
printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs);
while (1)
printk("\r");
}
}
}
/* OK - find out which device reselected us. */
id = read_3393(hostdata, WD_SOURCE_ID);
id &= SRCID_MASK;
/* and extract the lun from the ID message. (Note that we don't
* bother to check for a valid message here - I guess this is
* not the right way to go, but....)
*/
lun = read_3393(hostdata, WD_DATA);
if (hostdata->level2 < L2_RESELECT)
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
lun &= 7;
/* Now we look for the command that's reconnecting. */
cmd = (Scsi_Cmnd *) hostdata->disconnected_Q;
patch = NULL;
while (cmd) {
if (id == cmd->device->id && lun == cmd->device->lun)
break;
patch = cmd;
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
/* Hmm. Couldn't find a valid command.... What to do? */
if (!cmd) {
printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun);
break;
}
/* Ok, found the command - now start it up again. */
if (patch)
patch->host_scribble = cmd->host_scribble;
else
hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble;
hostdata->connected = cmd;
/* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
* because these things are preserved over a disconnect.
* But we DO need to fix the DPD bit so it's correct for this command.
*/
if (is_dir_out(cmd))
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
else
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
if (hostdata->level2 >= L2_RESELECT) {
write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else
hostdata->state = S_CONNECTED;
break;
default:
printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs);
}
write1_io(0, IO_LED_OFF);
DB(DB_INTR, printk("} "))
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
#define RESET_CARD 0
#define RESET_CARD_AND_BUS 1
#define B_FLAG 0x80
/*
* Caller must hold instance lock!
*/
static int reset_hardware(struct Scsi_Host *instance, int type)
{
struct IN2000_hostdata *hostdata;
int qt, x;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
write1_io(0, IO_LED_ON);
if (type == RESET_CARD_AND_BUS) {
write1_io(0, IO_CARD_RESET);
x = read1_io(IO_HARDWARE);
}
x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */
write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF));
write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
write_3393(hostdata, WD_COMMAND, WD_CMD_RESET);
/* FIXME: timeout ?? */
while (!(READ_AUX_STAT() & ASR_INT))
cpu_relax(); /* wait for RESET to complete */
x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */
qt = read_3393(hostdata, WD_QUEUE_TAG);
if (qt == 0xa5) {
x |= B_FLAG;
write_3393(hostdata, WD_QUEUE_TAG, 0);
}
write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write1_io(0, IO_LED_OFF);
return x;
}
static int in2000_bus_reset(Scsi_Cmnd * cmd)
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
int x;
unsigned long flags;
instance = cmd->device->host;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no);
spin_lock_irqsave(instance->host_lock, flags);
/* do scsi-reset here */
reset_hardware(instance, RESET_CARD_AND_BUS);
for (x = 0; x < 8; x++) {
hostdata->busy[x] = 0;
hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
}
hostdata->input_Q = NULL;
hostdata->selecting = NULL;
hostdata->connected = NULL;
hostdata->disconnected_Q = NULL;
hostdata->state = S_UNCONNECTED;
hostdata->fifo = FI_FIFO_UNUSED;
hostdata->incoming_ptr = 0;
hostdata->outgoing_len = 0;
cmd->result = DID_RESET << 16;
spin_unlock_irqrestore(instance->host_lock, flags);
return SUCCESS;
}
static int __in2000_abort(Scsi_Cmnd * cmd)
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *tmp, *prev;
uchar sr, asr;
unsigned long timeout;
instance = cmd->device->host;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no);
printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT));
/*
* Case 1 : If the command hasn't been issued yet, we simply remove it
* from the inout_Q.
*/
tmp = (Scsi_Cmnd *) hostdata->input_Q;
prev = NULL;
while (tmp) {
if (tmp == cmd) {
if (prev)
prev->host_scribble = cmd->host_scribble;
cmd->host_scribble = NULL;
cmd->result = DID_ABORT << 16;
printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
cmd->scsi_done(cmd);
return SUCCESS;
}
prev = tmp;
tmp = (Scsi_Cmnd *) tmp->host_scribble;
}
/*
* Case 2 : If the command is connected, we're going to fail the abort
* and let the high level SCSI driver retry at a later time or
* issue a reset.
*
* Timeouts, and therefore aborted commands, will be highly unlikely
* and handling them cleanly in this situation would make the common
* case of noresets less efficient, and would pollute our code. So,
* we fail.
*/
if (hostdata->connected == cmd) {
printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
printk("sending wd33c93 ABORT command - ");
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393_cmd(hostdata, WD_CMD_ABORT);
/* Now we have to attempt to flush out the FIFO... */
printk("flushing fifo - ");
timeout = 1000000;
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
read_3393(hostdata, WD_DATA);
} while (!(asr & ASR_INT) && timeout-- > 0);
sr = read_3393(hostdata, WD_SCSI_STATUS);
printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout);
/*
* Abort command processed.
* Still connected.
* We must disconnect.
*/
printk("sending wd33c93 DISCONNECT command - ");
write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
timeout = 1000000;
asr = READ_AUX_STAT();
while ((asr & ASR_CIP) && timeout-- > 0)
asr = READ_AUX_STAT();
sr = read_3393(hostdata, WD_SCSI_STATUS);
printk("asr=%02x, sr=%02x.", asr, sr);
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->connected = NULL;
hostdata->state = S_UNCONNECTED;
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd);
in2000_execute(instance);
return SUCCESS;
}
/*
* Case 3: If the command is currently disconnected from the bus,
* we're not going to expend much effort here: Let's just return
* an ABORT_SNOOZE and hope for the best...
*/
for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
if (cmd == tmp) {
printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no);
return FAILED;
}
/*
* Case 4 : If we reached this point, the command was not found in any of
* the queues.
*
* We probably reached this point because of an unlikely race condition
* between the command completing successfully and the abortion code,
* so we won't panic, but we will notify the user in case something really
* broke.
*/
in2000_execute(instance);
printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no);
return SUCCESS;
}
static int in2000_abort(Scsi_Cmnd * cmd)
{
int rc;
spin_lock_irq(cmd->device->host->host_lock);
rc = __in2000_abort(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return rc;
}
#define MAX_IN2000_HOSTS 3
#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args)
#define SETUP_BUFFER_SIZE 200
static char setup_buffer[SETUP_BUFFER_SIZE];
static char setup_used[MAX_SETUP_ARGS];
static int done_setup = 0;
static void __init in2000_setup(char *str, int *ints)
{
int i;
char *p1, *p2;
strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE);
p1 = setup_buffer;
i = 0;
while (*p1 && (i < MAX_SETUP_ARGS)) {
p2 = strchr(p1, ',');
if (p2) {
*p2 = '\0';
if (p1 != p2)
setup_args[i] = p1;
p1 = p2 + 1;
i++;
} else {
setup_args[i] = p1;
break;
}
}
for (i = 0; i < MAX_SETUP_ARGS; i++)
setup_used[i] = 0;
done_setup = 1;
}
/* check_setup_args() returns index if key found, 0 if not
*/
static int __init check_setup_args(char *key, int *val, char *buf)
{
int x;
char *cp;
for (x = 0; x < MAX_SETUP_ARGS; x++) {
if (setup_used[x])
continue;
if (!strncmp(setup_args[x], key, strlen(key)))
break;
}
if (x == MAX_SETUP_ARGS)
return 0;
setup_used[x] = 1;
cp = setup_args[x] + strlen(key);
*val = -1;
if (*cp != ':')
return ++x;
cp++;
if ((*cp >= '0') && (*cp <= '9')) {
*val = simple_strtoul(cp, NULL, 0);
}
return ++x;
}
/* The "correct" (ie portable) way to access memory-mapped hardware
* such as the IN2000 EPROM and dip switch is through the use of
* special macros declared in 'asm/io.h'. We use readb() and readl()
* when reading from the card's BIOS area in in2000_detect().
*/
static u32 bios_tab[] in2000__INITDATA = {
0xc8000,
0xd0000,
0xd8000,
0
};
static unsigned short base_tab[] in2000__INITDATA = {
0x220,
0x200,
0x110,
0x100,
};
static int int_tab[] in2000__INITDATA = {
15,
14,
11,
10
};
static int probe_bios(u32 addr, u32 *s1, uchar *switches)
{
void __iomem *p = ioremap(addr, 0x34);
if (!p)
return 0;
*s1 = readl(p + 0x10);
if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) {
/* Read the switch image that's mapped into EPROM space */
*switches = ~readb(p + 0x20);
iounmap(p);
return 1;
}
iounmap(p);
return 0;
}
static int __init in2000_detect(struct scsi_host_template * tpnt)
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
int detect_count;
int bios;
int x;
unsigned short base;
uchar switches;
uchar hrev;
unsigned long flags;
int val;
char buf[32];
/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
* pretty straightforward and fool-proof operation. There are 3
* possible locations for the IN2000 EPROM in memory space - if we
* find a BIOS signature, we can read the dip switch settings from
* the byte at BIOS+32 (shadowed in by logic on the card). From 2
* of the switch bits we get the card's address in IO space. There's
* an image of the dip switch there, also, so we have a way to back-
* check that this really is an IN2000 card. Very nifty. Use the
* 'ioport:xx' command-line parameter if your BIOS EPROM is absent
* or disabled.
*/
if (!done_setup && setup_strings)
in2000_setup(setup_strings, NULL);
detect_count = 0;
for (bios = 0; bios_tab[bios]; bios++) {
u32 s1 = 0;
if (check_setup_args("ioport", &val, buf)) {
base = val;
switches = ~inb(base + IO_SWITCHES) & 0xff;
printk("Forcing IN2000 detection at IOport 0x%x ", base);
bios = 2;
}
/*
* There have been a couple of BIOS versions with different layouts
* for the obvious ID strings. We look for the 2 most common ones and
* hope that they cover all the cases...
*/
else if (probe_bios(bios_tab[bios], &s1, &switches)) {
printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]);
/* Find out where the IO space is */
x = switches & (SW_ADDR0 | SW_ADDR1);
base = base_tab[x];
/* Check for the IN2000 signature in IO space. */
x = ~inb(base + IO_SWITCHES) & 0xff;
if (x != switches) {
printk("Bad IO signature: %02x vs %02x.\n", x, switches);
continue;
}
} else
continue;
/* OK. We have a base address for the IO ports - run a few safety checks */
if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */
printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base);
continue;
}
/* Let's assume any hardware version will work, although the driver
* has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
* print out the rev number for reference later, but accept them all.
*/
hrev = inb(base + IO_HARDWARE);
/* Bit 2 tells us if interrupts are disabled */
if (switches & SW_DISINT) {
printk("The IN-2000 SCSI card at IOport 0x%03x ", base);
printk("is not configured for interrupt operation!\n");
printk("This driver requires an interrupt: cancelling detection.\n");
continue;
}
/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
* initialize it.
*/
tpnt->proc_name = "in2000";
instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
if (instance == NULL)
continue;
detect_count++;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
instance->io_port = hostdata->io_base = base;
hostdata->dip_switch = switches;
hostdata->hrev = hrev;
write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
write1_io(0, IO_INTR_MASK); /* allow all ints */
x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) {
printk("in2000_detect: Unable to allocate IRQ.\n");
detect_count--;
continue;
}
instance->irq = x;
instance->n_io_port = 13;
request_region(base, 13, "in2000"); /* lock in this IO space for our use */
for (x = 0; x < 8; x++) {
hostdata->busy[x] = 0;
hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
#ifdef PROC_STATISTICS
hostdata->cmd_cnt[x] = 0;
hostdata->disc_allowed_cnt[x] = 0;
hostdata->disc_done_cnt[x] = 0;
#endif
}
hostdata->input_Q = NULL;
hostdata->selecting = NULL;
hostdata->connected = NULL;
hostdata->disconnected_Q = NULL;
hostdata->state = S_UNCONNECTED;
hostdata->fifo = FI_FIFO_UNUSED;
hostdata->level2 = L2_BASIC;
hostdata->disconnect = DIS_ADAPTIVE;
hostdata->args = DEBUG_DEFAULTS;
hostdata->incoming_ptr = 0;
hostdata->outgoing_len = 0;
hostdata->default_sx_per = DEFAULT_SX_PER;
/* Older BIOS's had a 'sync on/off' switch - use its setting */
if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5))
hostdata->sync_off = 0x00; /* sync defaults to on */
else
hostdata->sync_off = 0xff; /* sync defaults to off */
#ifdef PROC_INTERFACE
hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP;
#ifdef PROC_STATISTICS
hostdata->int_cnt = 0;
#endif
#endif
if (check_setup_args("nosync", &val, buf))
hostdata->sync_off = val;
if (check_setup_args("period", &val, buf))
hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns;
if (check_setup_args("disconnect", &val, buf)) {
if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
hostdata->disconnect = val;
else
hostdata->disconnect = DIS_ADAPTIVE;
}
if (check_setup_args("noreset", &val, buf))
hostdata->args ^= A_NO_SCSI_RESET;
if (check_setup_args("level2", &val, buf))
hostdata->level2 = val;
if (check_setup_args("debug", &val, buf))
hostdata->args = (val & DB_MASK);
#ifdef PROC_INTERFACE
if (check_setup_args("proc", &val, buf))
hostdata->proc = val;
#endif
/* FIXME: not strictly needed I think but the called code expects
to be locked */
spin_lock_irqsave(instance->host_lock, flags);
x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS);
spin_unlock_irqrestore(instance->host_lock, flags);
hostdata->microcode = read_3393(hostdata, WD_CDB_1);
if (x & 0x01) {
if (x & B_FLAG)
hostdata->chip = C_WD33C93B;
else
hostdata->chip = C_WD33C93A;
} else
hostdata->chip = C_WD33C93;
printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No");
printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode);
#ifdef DEBUGGING_ON
printk("setup_args = ");
for (x = 0; x < MAX_SETUP_ARGS; x++)
printk("%s,", setup_args[x]);
printk("\n");
#endif
if (hostdata->sync_off == 0xff)
printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE);
}
return detect_count;
}
static int in2000_release(struct Scsi_Host *shost)
{
if (shost->irq)
free_irq(shost->irq, shost);
if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port);
return 0;
}
/* NOTE: I lifted this function straight out of the old driver,
* and have not tested it. Presumably it does what it's
* supposed to do...
*/
static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo)
{
int size;
size = capacity;
iinfo[0] = 64;
iinfo[1] = 32;
iinfo[2] = size >> 11;
/* This should approximate the large drive handling that the DOS ASPI manager
uses. Drives very near the boundaries may not be handled correctly (i.e.
near 2.0 Gb and 4.0 Gb) */
if (iinfo[2] > 1024) {
iinfo[0] = 64;
iinfo[1] = 63;
iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
}
if (iinfo[2] > 1024) {
iinfo[0] = 128;
iinfo[1] = 63;
iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
}
if (iinfo[2] > 1024) {
iinfo[0] = 255;
iinfo[1] = 63;
iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
}
return 0;
}
static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len)
{
#ifdef PROC_INTERFACE
char *bp;
struct IN2000_hostdata *hd;
int x, i;
hd = (struct IN2000_hostdata *) instance->hostdata;
buf[len] = '\0';
bp = buf;
if (!strncmp(bp, "debug:", 6)) {
bp += 6;
hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
} else if (!strncmp(bp, "disconnect:", 11)) {
bp += 11;
x = simple_strtoul(bp, NULL, 0);
if (x < DIS_NEVER || x > DIS_ALWAYS)
x = DIS_ADAPTIVE;
hd->disconnect = x;
} else if (!strncmp(bp, "period:", 7)) {
bp += 7;
x = simple_strtoul(bp, NULL, 0);
hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
} else if (!strncmp(bp, "resync:", 7)) {
bp += 7;
x = simple_strtoul(bp, NULL, 0);
for (i = 0; i < 7; i++)
if (x & (1 << i))
hd->sync_stat[i] = SS_UNSET;
} else if (!strncmp(bp, "proc:", 5)) {
bp += 5;
hd->proc = simple_strtoul(bp, NULL, 0);
} else if (!strncmp(bp, "level2:", 7)) {
bp += 7;
hd->level2 = simple_strtoul(bp, NULL, 0);
}
#endif
return len;
}
static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
{
#ifdef PROC_INTERFACE
unsigned long flags;
struct IN2000_hostdata *hd;
Scsi_Cmnd *cmd;
int x;
hd = (struct IN2000_hostdata *) instance->hostdata;
spin_lock_irqsave(instance->host_lock, flags);
if (hd->proc & PR_VERSION)
seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
if (hd->proc & PR_INFO) {
seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
seq_printf(m, "\nsync_xfer[] = ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%02x", hd->sync_xfer[x]);
seq_printf(m, "\nsync_stat[] = ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%02x", hd->sync_stat[x]);
}
#ifdef PROC_STATISTICS
if (hd->proc & PR_STATISTICS) {
seq_printf(m, "\ncommands issued: ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
seq_printf(m, "\ndisconnects allowed:");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
seq_printf(m, "\ndisconnects done: ");
for (x = 0; x < 7; x++)
seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt);
}
#endif
if (hd->proc & PR_CONNECTED) {
seq_printf(m, "\nconnected: ");
if (hd->connected) {
cmd = (Scsi_Cmnd *) hd->connected;
seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
}
}
if (hd->proc & PR_INPUTQ) {
seq_printf(m, "\ninput_Q: ");
cmd = (Scsi_Cmnd *) hd->input_Q;
while (cmd) {
seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
if (hd->proc & PR_DISCQ) {
seq_printf(m, "\ndisconnected_Q:");
cmd = (Scsi_Cmnd *) hd->disconnected_Q;
while (cmd) {
seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
if (hd->proc & PR_TEST) {
; /* insert your own custom function here */
}
seq_printf(m, "\n");
spin_unlock_irqrestore(instance->host_lock, flags);
#endif /* PROC_INTERFACE */
return 0;
}
MODULE_LICENSE("GPL");
static struct scsi_host_template driver_template = {
.proc_name = "in2000",
.write_info = in2000_write_info,
.show_info = in2000_show_info,
.name = "Always IN2000",
.detect = in2000_detect,
.release = in2000_release,
.queuecommand = in2000_queuecommand,
.eh_abort_handler = in2000_abort,
.eh_bus_reset_handler = in2000_bus_reset,
.bios_param = in2000_biosparam,
.can_queue = IN2000_CAN_Q,
.this_id = IN2000_HOST_ID,
.sg_tablesize = IN2000_SG,
.cmd_per_lun = IN2000_CPL,
.use_clustering = DISABLE_CLUSTERING,
};
#include "scsi_module.c"
| gpl-2.0 |
holyangel/LGG3 | tools/perf/perf.c | 3837 | 12280 | /*
* perf.c
*
* Performance analysis utility.
*
* This is the main hub from which the sub-commands (perf stat,
* perf top, perf record, perf report, etc.) are started.
*/
#include "builtin.h"
#include "util/exec_cmd.h"
#include "util/cache.h"
#include "util/quote.h"
#include "util/run-command.h"
#include "util/parse-events.h"
#include "util/debugfs.h"
const char perf_usage_string[] =
"perf [--version] [--help] COMMAND [ARGS]";
const char perf_more_info_string[] =
"See 'perf help COMMAND' for more information on a specific command.";
int use_browser = -1;
static int use_pager = -1;
struct pager_config {
const char *cmd;
int val;
};
static int pager_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd))
c->val = perf_config_bool(var, value);
return 0;
}
/* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */
int check_pager_config(const char *cmd)
{
struct pager_config c;
c.cmd = cmd;
c.val = -1;
perf_config(pager_command_config, &c);
return c.val;
}
static int tui_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd))
c->val = perf_config_bool(var, value);
return 0;
}
/* returns 0 for "no tui", 1 for "use tui", and -1 for "not specified" */
static int check_tui_config(const char *cmd)
{
struct pager_config c;
c.cmd = cmd;
c.val = -1;
perf_config(tui_command_config, &c);
return c.val;
}
static void commit_pager_choice(void)
{
switch (use_pager) {
case 0:
setenv("PERF_PAGER", "cat", 1);
break;
case 1:
/* setup_pager(); */
break;
default:
break;
}
}
static int handle_options(const char ***argv, int *argc, int *envchanged)
{
int handled = 0;
while (*argc > 0) {
const char *cmd = (*argv)[0];
if (cmd[0] != '-')
break;
/*
* For legacy reasons, the "version" and "help"
* commands can be written with "--" prepended
* to make them look like flags.
*/
if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version"))
break;
/*
* Check remaining flags.
*/
if (!prefixcmp(cmd, CMD_EXEC_PATH)) {
cmd += strlen(CMD_EXEC_PATH);
if (*cmd == '=')
perf_set_argv_exec_path(cmd + 1);
else {
puts(perf_exec_path());
exit(0);
}
} else if (!strcmp(cmd, "--html-path")) {
puts(system_path(PERF_HTML_PATH));
exit(0);
} else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
use_pager = 1;
} else if (!strcmp(cmd, "--no-pager")) {
use_pager = 0;
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--perf-dir")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --perf-dir.\n");
usage(perf_usage_string);
}
setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
handled++;
} else if (!prefixcmp(cmd, CMD_PERF_DIR)) {
setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1);
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--work-tree")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --work-tree.\n");
usage(perf_usage_string);
}
setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!prefixcmp(cmd, CMD_WORK_TREE)) {
setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1);
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--debugfs-dir")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --debugfs-dir.\n");
usage(perf_usage_string);
}
debugfs_set_path((*argv)[1]);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
if (envchanged)
*envchanged = 1;
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
usage(perf_usage_string);
}
(*argv)++;
(*argc)--;
handled++;
}
return handled;
}
static int handle_alias(int *argcp, const char ***argv)
{
int envchanged = 0, ret = 0, saved_errno = errno;
int count, option_count;
const char **new_argv;
const char *alias_command;
char *alias_string;
alias_command = (*argv)[0];
alias_string = alias_lookup(alias_command);
if (alias_string) {
if (alias_string[0] == '!') {
if (*argcp > 1) {
struct strbuf buf;
strbuf_init(&buf, PATH_MAX);
strbuf_addstr(&buf, alias_string);
sq_quote_argv(&buf, (*argv) + 1, PATH_MAX);
free(alias_string);
alias_string = buf.buf;
}
ret = system(alias_string + 1);
if (ret >= 0 && WIFEXITED(ret) &&
WEXITSTATUS(ret) != 127)
exit(WEXITSTATUS(ret));
die("Failed to run '%s' when expanding alias '%s'",
alias_string + 1, alias_command);
}
count = split_cmdline(alias_string, &new_argv);
if (count < 0)
die("Bad alias.%s string", alias_command);
option_count = handle_options(&new_argv, &count, &envchanged);
if (envchanged)
die("alias '%s' changes environment variables\n"
"You can use '!perf' in the alias to do this.",
alias_command);
memmove(new_argv - option_count, new_argv,
count * sizeof(char *));
new_argv -= option_count;
if (count < 1)
die("empty alias for %s", alias_command);
if (!strcmp(alias_command, new_argv[0]))
die("recursive alias: %s", alias_command);
new_argv = realloc(new_argv, sizeof(char *) *
(count + *argcp + 1));
/* insert after command name */
memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
new_argv[count + *argcp] = NULL;
*argv = new_argv;
*argcp += count - 1;
ret = 1;
}
errno = saved_errno;
return ret;
}
const char perf_version_string[] = PERF_VERSION;
#define RUN_SETUP (1<<0)
#define USE_PAGER (1<<1)
/*
* require working tree to be present -- anything uses this needs
* RUN_SETUP for reading from the configuration file.
*/
#define NEED_WORK_TREE (1<<2)
struct cmd_struct {
const char *cmd;
int (*fn)(int, const char **, const char *);
int option;
};
static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
{
int status;
struct stat st;
const char *prefix;
prefix = NULL;
if (p->option & RUN_SETUP)
prefix = NULL; /* setup_perf_directory(); */
if (use_browser == -1)
use_browser = check_tui_config(p->cmd);
if (use_pager == -1 && p->option & RUN_SETUP)
use_pager = check_pager_config(p->cmd);
if (use_pager == -1 && p->option & USE_PAGER)
use_pager = 1;
commit_pager_choice();
status = p->fn(argc, argv, prefix);
exit_browser(status);
if (status)
return status & 0xff;
/* Somebody closed stdout? */
if (fstat(fileno(stdout), &st))
return 0;
/* Ignore write errors for pipes and sockets.. */
if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode))
return 0;
/* Check for ENOSPC and EIO errors.. */
if (fflush(stdout))
die("write failure on standard output: %s", strerror(errno));
if (ferror(stdout))
die("unknown write failure on standard output");
if (fclose(stdout))
die("close failed on standard output: %s", strerror(errno));
return 0;
}
static void handle_internal_command(int argc, const char **argv)
{
const char *cmd = argv[0];
static struct cmd_struct commands[] = {
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
{ "diff", cmd_diff, 0 },
{ "evlist", cmd_evlist, 0 },
{ "help", cmd_help, 0 },
{ "list", cmd_list, 0 },
{ "record", cmd_record, 0 },
{ "report", cmd_report, 0 },
{ "bench", cmd_bench, 0 },
{ "stat", cmd_stat, 0 },
{ "periodic", cmd_periodic, 0 },
{ "timechart", cmd_timechart, 0 },
{ "top", cmd_top, 0 },
{ "annotate", cmd_annotate, 0 },
{ "version", cmd_version, 0 },
{ "script", cmd_script, 0 },
{ "sched", cmd_sched, 0 },
{ "probe", cmd_probe, 0 },
{ "kmem", cmd_kmem, 0 },
{ "lock", cmd_lock, 0 },
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
{ "inject", cmd_inject, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
if (sizeof(ext) > 1) {
i = strlen(argv[0]) - strlen(ext);
if (i > 0 && !strcmp(argv[0] + i, ext)) {
char *argv0 = strdup(argv[0]);
argv[0] = cmd = argv0;
argv0[i] = '\0';
}
}
/* Turn "perf cmd --help" into "perf help cmd" */
if (argc > 1 && !strcmp(argv[1], "--help")) {
argv[1] = argv[0];
argv[0] = cmd = "help";
}
for (i = 0; i < ARRAY_SIZE(commands); i++) {
struct cmd_struct *p = commands+i;
if (strcmp(p->cmd, cmd))
continue;
exit(run_builtin(p, argc, argv));
}
}
static void execv_dashed_external(const char **argv)
{
struct strbuf cmd = STRBUF_INIT;
const char *tmp;
int status;
strbuf_addf(&cmd, "perf-%s", argv[0]);
/*
* argv[0] must be the perf command, but the argv array
* belongs to the caller, and may be reused in
* subsequent loop iterations. Save argv[0] and
* restore it on error.
*/
tmp = argv[0];
argv[0] = cmd.buf;
/*
* if we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code.
*/
status = run_command_v_opt(argv, 0);
if (status != -ERR_RUN_COMMAND_EXEC) {
if (IS_RUN_COMMAND_ERR(status))
die("unable to run '%s'", argv[0]);
exit(-status);
}
errno = ENOENT; /* as if we called execvp */
argv[0] = tmp;
strbuf_release(&cmd);
}
static int run_argv(int *argcp, const char ***argv)
{
int done_alias = 0;
while (1) {
/* See if it's an internal command */
handle_internal_command(*argcp, *argv);
/* .. then try the external ones */
execv_dashed_external(*argv);
/* It could be an alias -- this works around the insanity
* of overriding "perf log" with "perf show" by having
* alias.log = show
*/
if (done_alias || !handle_alias(argcp, argv))
break;
done_alias = 1;
}
return done_alias;
}
static void pthread__block_sigwinch(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGWINCH);
pthread_sigmask(SIG_BLOCK, &set, NULL);
}
void pthread__unblock_sigwinch(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGWINCH);
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}
int main(int argc, const char **argv)
{
const char *cmd;
cmd = perf_extract_argv0_path(argv[0]);
if (!cmd)
cmd = "perf-help";
/* get debugfs mount point from /proc/mounts */
debugfs_mount(NULL);
/*
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
*
* - cannot take flags in between the "perf" and the "xxxx".
* - cannot execute it externally (since it would just do
* the same thing over again)
*
* So we just directly call the internal command handler, and
* die if that one cannot handle it.
*/
if (!prefixcmp(cmd, "perf-")) {
cmd += 5;
argv[0] = cmd;
handle_internal_command(argc, argv);
die("cannot handle %s internally", cmd);
}
/* Look for flags.. */
argv++;
argc--;
handle_options(&argv, &argc, NULL);
commit_pager_choice();
set_buildid_dir();
if (argc > 0) {
if (!prefixcmp(argv[0], "--"))
argv[0] += 2;
} else {
/* The user didn't specify a command; give them help */
printf("\n usage: %s\n\n", perf_usage_string);
list_common_cmds_help();
printf("\n %s\n\n", perf_more_info_string);
exit(1);
}
cmd = argv[0];
/*
* We use PATH to find perf commands, but we prepend some higher
* precedence paths: the "--exec-path" option, the PERF_EXEC_PATH
* environment, and the $(perfexecdir) from the Makefile at build
* time.
*/
setup_path();
/*
* Block SIGWINCH notifications so that the thread that wants it can
* unblock and get syscalls like select interrupted instead of waiting
* forever while the signal goes to some other non interested thread.
*/
pthread__block_sigwinch();
while (1) {
static int done_help;
static int was_alias;
was_alias = run_argv(&argc, &argv);
if (errno != ENOENT)
break;
if (was_alias) {
fprintf(stderr, "Expansion of alias '%s' failed; "
"'%s' is not a perf-command\n",
cmd, argv[0]);
exit(1);
}
if (!done_help) {
cmd = argv[0] = help_unknown_cmd(cmd);
done_help = 1;
} else
break;
}
fprintf(stderr, "Failed to run command '%s': %s\n",
cmd, strerror(errno));
return 1;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.