repo_name
string
path
string
copies
string
size
string
content
string
license
string
OpenSEMC/android_kernel_sony_msm8x60
drivers/media/video/s5p-mfc/s5p_mfc_opr.c
5058
43948
/* * drivers/media/video/samsung/mfc5/s5p_mfc_opr.c * * Samsung MFC (Multi Function Codec - FIMV) driver * This file contains hw related functions. * * Kamil Debski, Copyright (c) 2011 Samsung Electronics * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "regs-mfc.h" #include "s5p_mfc_cmd.h" #include "s5p_mfc_common.h" #include "s5p_mfc_ctrl.h" #include "s5p_mfc_debug.h" #include "s5p_mfc_intr.h" #include "s5p_mfc_opr.h" #include "s5p_mfc_pm.h" #include "s5p_mfc_shm.h" #include <asm/cacheflush.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/sched.h> #define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT) #define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT) /* Allocate temporary buffers for decoding */ int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx) { void *desc_virt; struct s5p_mfc_dev *dev = ctx->dev; ctx->desc_buf = vb2_dma_contig_memops.alloc( dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE); if (IS_ERR_VALUE((int)ctx->desc_buf)) { ctx->desc_buf = 0; mfc_err("Allocating DESC buffer failed\n"); return -ENOMEM; } ctx->desc_phys = s5p_mfc_mem_cookie( dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->desc_buf); BUG_ON(ctx->desc_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); desc_virt = vb2_dma_contig_memops.vaddr(ctx->desc_buf); if (desc_virt == NULL) { vb2_dma_contig_memops.put(ctx->desc_buf); ctx->desc_phys = 0; ctx->desc_buf = 0; mfc_err("Remapping DESC buffer failed\n"); return -ENOMEM; } memset(desc_virt, 0, DESC_BUF_SIZE); wmb(); return 0; } /* Release temporary buffers for decoding */ void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx) { if (ctx->desc_phys) { vb2_dma_contig_memops.put(ctx->desc_buf); ctx->desc_phys = 0; ctx->desc_buf = 0; } } /* Allocate codec buffers */ int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned int enc_ref_y_size = 0; unsigned int enc_ref_c_size = 0; unsigned int guard_width, guard_height; if (ctx->type == MFCINST_DECODER) { mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n", ctx->luma_size, ctx->chroma_size, ctx->mv_size); mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count); } else if (ctx->type == MFCINST_ENCODER) { enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN); if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) { enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height >> 1, S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN); } else { guard_width = ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN); guard_height = ALIGN((ctx->img_height >> 1) + 4, S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(guard_width * guard_height, S5P_FIMV_NV12MT_SALIGN); } mfc_debug(2, "recon luma size: %d chroma size: %d\n", enc_ref_y_size, enc_ref_c_size); } else { return -EINVAL; } /* Codecs have different memory requirements */ switch (ctx->codec_mode) { case S5P_FIMV_CODEC_H264_DEC: ctx->bank1_size = ALIGN(S5P_FIMV_DEC_NB_IP_SIZE + S5P_FIMV_DEC_VERT_NB_MV_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size; break; case S5P_FIMV_CODEC_MPEG4_DEC: ctx->bank1_size = ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE + S5P_FIMV_DEC_UPNB_MV_SIZE + S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE + S5P_FIMV_DEC_STX_PARSER_SIZE + S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2_size = 0; break; case S5P_FIMV_CODEC_VC1RCV_DEC: case S5P_FIMV_CODEC_VC1_DEC: ctx->bank1_size = ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE + S5P_FIMV_DEC_UPNB_MV_SIZE + S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE + S5P_FIMV_DEC_NB_DCAC_SIZE + 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2_size = 0; break; case S5P_FIMV_CODEC_MPEG2_DEC: ctx->bank1_size = 0; ctx->bank2_size = 0; break; case S5P_FIMV_CODEC_H263_DEC: ctx->bank1_size = ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE + S5P_FIMV_DEC_UPNB_MV_SIZE + S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE + S5P_FIMV_DEC_NB_DCAC_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2_size = 0; break; case S5P_FIMV_CODEC_H264_ENC: ctx->bank1_size = (enc_ref_y_size * 2) + S5P_FIMV_ENC_UPMV_SIZE + S5P_FIMV_ENC_COLFLG_SIZE + S5P_FIMV_ENC_INTRAMD_SIZE + S5P_FIMV_ENC_NBORINFO_SIZE; ctx->bank2_size = (enc_ref_y_size * 2) + (enc_ref_c_size * 4) + S5P_FIMV_ENC_INTRAPRED_SIZE; break; case S5P_FIMV_CODEC_MPEG4_ENC: ctx->bank1_size = (enc_ref_y_size * 2) + S5P_FIMV_ENC_UPMV_SIZE + S5P_FIMV_ENC_COLFLG_SIZE + S5P_FIMV_ENC_ACDCCOEF_SIZE; ctx->bank2_size = (enc_ref_y_size * 2) + (enc_ref_c_size * 4); break; case S5P_FIMV_CODEC_H263_ENC: ctx->bank1_size = (enc_ref_y_size * 2) + S5P_FIMV_ENC_UPMV_SIZE + S5P_FIMV_ENC_ACDCCOEF_SIZE; ctx->bank2_size = (enc_ref_y_size * 2) + (enc_ref_c_size * 4); break; default: break; } /* Allocate only if memory from bank 1 is necessary */ if (ctx->bank1_size > 0) { ctx->bank1_buf = vb2_dma_contig_memops.alloc( dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size); if (IS_ERR(ctx->bank1_buf)) { ctx->bank1_buf = 0; printk(KERN_ERR "Buf alloc for decoding failed (port A)\n"); return -ENOMEM; } ctx->bank1_phys = s5p_mfc_mem_cookie( dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf); BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); } /* Allocate only if memory from bank 2 is necessary */ if (ctx->bank2_size > 0) { ctx->bank2_buf = vb2_dma_contig_memops.alloc( dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size); if (IS_ERR(ctx->bank2_buf)) { ctx->bank2_buf = 0; mfc_err("Buf alloc for decoding failed (port B)\n"); return -ENOMEM; } ctx->bank2_phys = s5p_mfc_mem_cookie( dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf); BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1)); } return 0; } /* Release buffers allocated for codec */ void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx) { if (ctx->bank1_buf) { vb2_dma_contig_memops.put(ctx->bank1_buf); ctx->bank1_buf = 0; ctx->bank1_phys = 0; ctx->bank1_size = 0; } if (ctx->bank2_buf) { vb2_dma_contig_memops.put(ctx->bank2_buf); ctx->bank2_buf = 0; ctx->bank2_phys = 0; ctx->bank2_size = 0; } } /* Allocate memory for instance data buffer */ int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx) { void *context_virt; struct s5p_mfc_dev *dev = ctx->dev; if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC || ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) ctx->ctx_size = MFC_H264_CTX_BUF_SIZE; else ctx->ctx_size = MFC_CTX_BUF_SIZE; ctx->ctx_buf = vb2_dma_contig_memops.alloc( dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_size); if (IS_ERR(ctx->ctx_buf)) { mfc_err("Allocating context buffer failed\n"); ctx->ctx_phys = 0; ctx->ctx_buf = 0; return -ENOMEM; } ctx->ctx_phys = s5p_mfc_mem_cookie( dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_buf); BUG_ON(ctx->ctx_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); ctx->ctx_ofs = OFFSETA(ctx->ctx_phys); context_virt = vb2_dma_contig_memops.vaddr(ctx->ctx_buf); if (context_virt == NULL) { mfc_err("Remapping instance buffer failed\n"); vb2_dma_contig_memops.put(ctx->ctx_buf); ctx->ctx_phys = 0; ctx->ctx_buf = 0; return -ENOMEM; } /* Zero content of the allocated memory */ memset(context_virt, 0, ctx->ctx_size); wmb(); if (s5p_mfc_init_shm(ctx) < 0) { vb2_dma_contig_memops.put(ctx->ctx_buf); ctx->ctx_phys = 0; ctx->ctx_buf = 0; return -ENOMEM; } return 0; } /* Release instance buffer */ void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx) { if (ctx->ctx_buf) { vb2_dma_contig_memops.put(ctx->ctx_buf); ctx->ctx_phys = 0; ctx->ctx_buf = 0; } if (ctx->shm_alloc) { vb2_dma_contig_memops.put(ctx->shm_alloc); ctx->shm_alloc = 0; ctx->shm = 0; } } /* Set registers for decoding temporary buffers */ void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, OFFSETA(ctx->desc_phys), S5P_FIMV_SI_CH0_DESC_ADR); mfc_write(dev, DESC_BUF_SIZE, S5P_FIMV_SI_CH0_DESC_SIZE); } /* Set registers for shared buffer */ void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR); } /* Set registers for decoding stream buffer */ int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr, unsigned int start_num_byte, unsigned int buf_size) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR); mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE); mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE); s5p_mfc_write_shm(ctx, start_num_byte, START_BYTE_NUM); return 0; } /* Set decoding frame buffer */ int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx) { unsigned int frame_size, i; unsigned int frame_size_ch, frame_size_mv; struct s5p_mfc_dev *dev = ctx->dev; unsigned int dpb; size_t buf_addr1, buf_addr2; int buf_size1, buf_size2; buf_addr1 = ctx->bank1_phys; buf_size1 = ctx->bank1_size; buf_addr2 = ctx->bank2_phys; buf_size2 = ctx->bank2_size; dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) & ~S5P_FIMV_DPB_COUNT_MASK; mfc_write(dev, ctx->total_dpb_count | dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL); s5p_mfc_set_shared_buffer(ctx); switch (ctx->codec_mode) { case S5P_FIMV_CODEC_H264_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_VERT_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR); buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE; break; case S5P_FIMV_CODEC_MPEG4_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR); buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR); buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR); buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE; buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR); buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; break; case S5P_FIMV_CODEC_H263_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR); buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR); buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR); buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE; break; case S5P_FIMV_CODEC_VC1_DEC: case S5P_FIMV_CODEC_VC1RCV_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR); buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR); buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR); buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR); buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE; buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR); buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE; buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR); buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE; buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE; break; case S5P_FIMV_CODEC_MPEG2_DEC: break; default: mfc_err("Unknown codec for decoding (%x)\n", ctx->codec_mode); return -EINVAL; break; } frame_size = ctx->luma_size; frame_size_ch = ctx->chroma_size; frame_size_mv = ctx->mv_size; mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch, frame_size_mv); for (i = 0; i < ctx->total_dpb_count; i++) { /* Bank2 */ mfc_debug(2, "Luma %d: %x\n", i, ctx->dst_bufs[i].cookie.raw.luma); mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma), S5P_FIMV_DEC_LUMA_ADR + i * 4); mfc_debug(2, "\tChroma %d: %x\n", i, ctx->dst_bufs[i].cookie.raw.chroma); mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma), S5P_FIMV_DEC_CHROMA_ADR + i * 4); if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) { mfc_debug(2, "\tBuf2: %x, size: %d\n", buf_addr2, buf_size2); mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_H264_MV_ADR + i * 4); buf_addr2 += frame_size_mv; buf_size2 -= frame_size_mv; } } mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1); mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n", buf_size1, buf_size2, ctx->total_dpb_count); if (buf_size1 < 0 || buf_size2 < 0) { mfc_debug(2, "Not enough memory has been allocated\n"); return -ENOMEM; } s5p_mfc_write_shm(ctx, frame_size, ALLOC_LUMA_DPB_SIZE); s5p_mfc_write_shm(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE); if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) s5p_mfc_write_shm(ctx, frame_size_mv, ALLOC_MV_SIZE); mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } /* Set registers for encoding stream buffer */ int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx, unsigned long addr, unsigned int size) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR); mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE); return 0; } void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx, unsigned long y_addr, unsigned long c_addr) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR); mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR); } void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx, unsigned long *y_addr, unsigned long *c_addr) { struct s5p_mfc_dev *dev = ctx->dev; *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR) << MFC_OFFSET_SHIFT); *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR) << MFC_OFFSET_SHIFT); } /* Set encoding ref & codec buffer */ int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; size_t buf_addr1, buf_addr2; size_t buf_size1, buf_size2; unsigned int enc_ref_y_size, enc_ref_c_size; unsigned int guard_width, guard_height; int i; buf_addr1 = ctx->bank1_phys; buf_size1 = ctx->bank1_size; buf_addr2 = ctx->bank2_phys; buf_size2 = ctx->bank2_size; enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN); if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) { enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN); } else { guard_width = ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN); guard_height = ALIGN((ctx->img_height >> 1) + 4, S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(guard_width * guard_height, S5P_FIMV_NV12MT_SALIGN); } mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); switch (ctx->codec_mode) { case S5P_FIMV_CODEC_H264_ENC: for (i = 0; i < 2; i++) { mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i)); buf_addr1 += enc_ref_y_size; buf_size1 -= enc_ref_y_size; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i)); buf_addr2 += enc_ref_y_size; buf_size2 -= enc_ref_y_size; } for (i = 0; i < 4; i++) { mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i)); buf_addr2 += enc_ref_c_size; buf_size2 -= enc_ref_c_size; } mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR); buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE; buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_COZERO_FLAG_ADR); buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE; buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_INTRA_MD_ADR); buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE; buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_H264_UP_INTRA_PRED_ADR); buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE; buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NBOR_INFO_ADR); buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE; buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE; mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); break; case S5P_FIMV_CODEC_MPEG4_ENC: for (i = 0; i < 2; i++) { mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i)); buf_addr1 += enc_ref_y_size; buf_size1 -= enc_ref_y_size; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i)); buf_addr2 += enc_ref_y_size; buf_size2 -= enc_ref_y_size; } for (i = 0; i < 4; i++) { mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i)); buf_addr2 += enc_ref_c_size; buf_size2 -= enc_ref_c_size; } mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR); buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE; buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_COZERO_FLAG_ADR); buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE; buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_ACDC_COEF_ADR); buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE; buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE; mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); break; case S5P_FIMV_CODEC_H263_ENC: for (i = 0; i < 2; i++) { mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i)); buf_addr1 += enc_ref_y_size; buf_size1 -= enc_ref_y_size; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i)); buf_addr2 += enc_ref_y_size; buf_size2 -= enc_ref_y_size; } for (i = 0; i < 4; i++) { mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i)); buf_addr2 += enc_ref_c_size; buf_size2 -= enc_ref_c_size; } mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR); buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE; buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR); buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE; buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE; mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); break; default: mfc_err("Unknown codec set for encoding: %d\n", ctx->codec_mode); return -EINVAL; } return 0; } static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; unsigned int reg; unsigned int shm; /* width */ mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX); /* height */ mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX); /* pictype : enable, IDR period */ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL); reg |= (1 << 18); reg &= ~(0xFFFF); reg |= p->gop_size; mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL); mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON); /* multi-slice control */ /* multi-slice MB number or bit size */ mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL); if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) { mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB); } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) { mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT); } else { mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB); mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT); } /* cyclic intra refresh */ mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL); /* memory structure cur. frame */ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR); else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR); /* padding control & value */ reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL); if (p->pad) { /** enable */ reg |= (1 << 31); /** cr value */ reg &= ~(0xFF << 16); reg |= (p->pad_cr << 16); /** cb value */ reg &= ~(0xFF << 8); reg |= (p->pad_cb << 8); /** y value */ reg &= ~(0xFF); reg |= (p->pad_luma); } else { /** disable & all value clear */ reg = 0; } mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL); /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /** frame-level rate control */ reg &= ~(0x1 << 9); reg |= (p->rc_frame << 9); mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* bit rate */ if (p->rc_frame) mfc_write(dev, p->rc_bitrate, S5P_FIMV_ENC_RC_BIT_RATE); else mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE); /* reaction coefficient */ if (p->rc_frame) mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA); shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL); /* seq header ctrl */ shm &= ~(0x1 << 3); shm |= (p->seq_hdr_mode << 3); /* frame skip mode */ shm &= ~(0x3 << 1); shm |= (p->frame_skip_mode << 1); s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL); /* fixed target bit */ s5p_mfc_write_shm(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG); return 0; } static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264; unsigned int reg; unsigned int shm; s5p_mfc_set_enc_params(ctx); /* pictype : number of B */ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* num_b_frame - 0 ~ 2 */ reg &= ~(0x3 << 16); reg |= (p->num_b_frame << 16); mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* profile & level */ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE); /* level */ reg &= ~(0xFF << 8); reg |= (p_264->level << 8); /* profile - 0 ~ 2 */ reg &= ~(0x3F); reg |= p_264->profile; mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE); /* interlace */ mfc_write(dev, p->interlace, S5P_FIMV_ENC_PIC_STRUCT); /* height */ if (p->interlace) mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX); /* loopfilter ctrl */ mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL); /* loopfilter alpha offset */ if (p_264->loop_filter_alpha < 0) { reg = 0x10; reg |= (0xFF - p_264->loop_filter_alpha) + 1; } else { reg = 0x00; reg |= (p_264->loop_filter_alpha & 0xF); } mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF); /* loopfilter beta offset */ if (p_264->loop_filter_beta < 0) { reg = 0x10; reg |= (0xFF - p_264->loop_filter_beta) + 1; } else { reg = 0x00; reg |= (p_264->loop_filter_beta & 0xF); } mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF); /* entropy coding mode */ if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC) mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE); else mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE); /* number of ref. picture */ reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF); /* num of ref. pictures of P */ reg &= ~(0x3 << 5); reg |= (p_264->num_ref_pic_4p << 5); /* max number of ref. pictures */ reg &= ~(0x1F); reg |= p_264->max_ref_pic; mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF); /* 8x8 transform enable */ mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG); /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /* macroblock level rate control */ reg &= ~(0x1 << 8); reg |= (p_264->rc_mb << 8); /* frame QP */ reg &= ~(0x3F); reg |= p_264->rc_frame_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* frame rate */ if (p->rc_frame && p->rc_framerate_denom) mfc_write(dev, p->rc_framerate_num * 1000 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE); else mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE); /* max & min value of QP */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND); /* max QP */ reg &= ~(0x3F << 8); reg |= (p_264->rc_max_qp << 8); /* min QP */ reg &= ~(0x3F); reg |= p_264->rc_min_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND); /* macroblock adaptive scaling features */ if (p_264->rc_mb) { reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL); /* dark region */ reg &= ~(0x1 << 3); reg |= (p_264->rc_mb_dark << 3); /* smooth region */ reg &= ~(0x1 << 2); reg |= (p_264->rc_mb_smooth << 2); /* static region */ reg &= ~(0x1 << 1); reg |= (p_264->rc_mb_static << 1); /* high activity region */ reg &= ~(0x1); reg |= p_264->rc_mb_activity; mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL); } if (!p->rc_frame && !p_264->rc_mb) { shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP); shm &= ~(0xFFF); shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6); shm |= (p_264->rc_p_frame_qp & 0x3F); s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP); } /* extended encoder ctrl */ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL); /* AR VUI control */ shm &= ~(0x1 << 15); shm |= (p_264->vui_sar << 1); s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL); if (p_264->vui_sar) { /* aspect ration IDC */ shm = s5p_mfc_read_shm(ctx, SAMPLE_ASPECT_RATIO_IDC); shm &= ~(0xFF); shm |= p_264->vui_sar_idc; s5p_mfc_write_shm(ctx, shm, SAMPLE_ASPECT_RATIO_IDC); if (p_264->vui_sar_idc == 0xFF) { /* sample AR info */ shm = s5p_mfc_read_shm(ctx, EXTENDED_SAR); shm &= ~(0xFFFFFFFF); shm |= p_264->vui_ext_sar_width << 16; shm |= p_264->vui_ext_sar_height; s5p_mfc_write_shm(ctx, shm, EXTENDED_SAR); } } /* intra picture period for H.264 */ shm = s5p_mfc_read_shm(ctx, H264_I_PERIOD); /* control */ shm &= ~(0x1 << 16); shm |= (p_264->open_gop << 16); /* value */ if (p_264->open_gop) { shm &= ~(0xFFFF); shm |= p_264->open_gop_size; } s5p_mfc_write_shm(ctx, shm, H264_I_PERIOD); /* extended encoder ctrl */ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL); /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { shm &= ~(0xFFFF << 16); shm |= (p_264->cpb_size << 16); } s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL); return 0; } static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4; unsigned int reg; unsigned int shm; unsigned int framerate; s5p_mfc_set_enc_params(ctx); /* pictype : number of B */ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* num_b_frame - 0 ~ 2 */ reg &= ~(0x3 << 16); reg |= (p->num_b_frame << 16); mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* profile & level */ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE); /* level */ reg &= ~(0xFF << 8); reg |= (p_mpeg4->level << 8); /* profile - 0 ~ 2 */ reg &= ~(0x3F); reg |= p_mpeg4->profile; mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE); /* quarter_pixel */ mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL); /* qp */ if (!p->rc_frame) { shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP); shm &= ~(0xFFF); shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6); shm |= (p_mpeg4->rc_p_frame_qp & 0x3F); s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP); } /* frame rate */ if (p->rc_frame) { if (p->rc_framerate_denom > 0) { framerate = p->rc_framerate_num * 1000 / p->rc_framerate_denom; mfc_write(dev, framerate, S5P_FIMV_ENC_RC_FRAME_RATE); shm = s5p_mfc_read_shm(ctx, RC_VOP_TIMING); shm &= ~(0xFFFFFFFF); shm |= (1 << 31); shm |= ((p->rc_framerate_num & 0x7FFF) << 16); shm |= (p->rc_framerate_denom & 0xFFFF); s5p_mfc_write_shm(ctx, shm, RC_VOP_TIMING); } } else { mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE); } /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /* frame QP */ reg &= ~(0x3F); reg |= p_mpeg4->rc_frame_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* max & min value of QP */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND); /* max QP */ reg &= ~(0x3F << 8); reg |= (p_mpeg4->rc_max_qp << 8); /* min QP */ reg &= ~(0x3F); reg |= p_mpeg4->rc_min_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND); /* extended encoder ctrl */ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL); /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { shm &= ~(0xFFFF << 16); shm |= (p->vbv_size << 16); } s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL); return 0; } static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4; unsigned int reg; unsigned int shm; s5p_mfc_set_enc_params(ctx); /* qp */ if (!p->rc_frame) { shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP); shm &= ~(0xFFF); shm |= (p_h263->rc_p_frame_qp & 0x3F); s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP); } /* frame rate */ if (p->rc_frame && p->rc_framerate_denom) mfc_write(dev, p->rc_framerate_num * 1000 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE); else mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE); /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /* frame QP */ reg &= ~(0x3F); reg |= p_h263->rc_frame_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* max & min value of QP */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND); /* max QP */ reg &= ~(0x3F << 8); reg |= (p_h263->rc_max_qp << 8); /* min QP */ reg &= ~(0x3F); reg |= p_h263->rc_min_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND); /* extended encoder ctrl */ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL); /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { shm &= ~(0xFFFF << 16); shm |= (p->vbv_size << 16); } s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL); return 0; } /* Initialize decoding */ int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_shared_buffer(ctx); /* Setup loop filter, for decoding this is only valid for MPEG4 */ if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_DEC) mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL); else mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL); mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) << S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable << S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay & S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT), S5P_FIMV_SI_CH0_DPB_CONF_CTRL); mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush) { struct s5p_mfc_dev *dev = ctx->dev; unsigned int dpb; if (flush) dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | ( S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT); else dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) & ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT); mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL); } /* Decode a single frame */ int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx, enum s5p_mfc_decode_arg last_frame) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF); s5p_mfc_set_shared_buffer(ctx); s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag); /* Issue different commands to instance basing on whether it * is the last frame or not. */ switch (last_frame) { case MFC_DEC_FRAME: mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); break; case MFC_DEC_LAST_FRAME: mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); break; case MFC_DEC_RES_CHANGE: mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); break; } mfc_debug(2, "Decoding a usual frame\n"); return 0; } int s5p_mfc_init_encode(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) s5p_mfc_set_enc_params_h264(ctx); else if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_ENC) s5p_mfc_set_enc_params_mpeg4(ctx); else if (ctx->codec_mode == S5P_FIMV_CODEC_H263_ENC) s5p_mfc_set_enc_params_h263(ctx); else { mfc_err("Unknown codec for encoding (%x)\n", ctx->codec_mode); return -EINVAL; } s5p_mfc_set_shared_buffer(ctx); mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } /* Encode a single frame */ int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; /* memory structure cur. frame */ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR); else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR); s5p_mfc_set_shared_buffer(ctx); mfc_write(dev, (S5P_FIMV_CH_FRAME_START << 16 & 0x70000) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev) { unsigned long flags; int new_ctx; int cnt; spin_lock_irqsave(&dev->condlock, flags); new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS; cnt = 0; while (!test_bit(new_ctx, &dev->ctx_work_bits)) { new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS; if (++cnt > MFC_NUM_CONTEXTS) { /* No contexts to run */ spin_unlock_irqrestore(&dev->condlock, flags); return -EAGAIN; } } spin_unlock_irqrestore(&dev->condlock, flags); return new_ctx; } static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_dec_stream_buffer(ctx, 0, 0, 0); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_decode_one_frame(ctx, MFC_DEC_RES_CHANGE); } static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *temp_vb; unsigned long flags; unsigned int index; spin_lock_irqsave(&dev->irqlock, flags); /* Frames are being decoded */ if (list_empty(&ctx->src_queue)) { mfc_debug(2, "No src buffers\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } /* Get the next source buffer */ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); temp_vb->used = 1; s5p_mfc_set_dec_stream_buffer(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); index = temp_vb->b->v4l2_buf.index; dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); if (temp_vb->b->v4l2_planes[0].bytesused == 0) { last_frame = MFC_DEC_LAST_FRAME; mfc_debug(2, "Setting ctx->state to FINISHING\n"); ctx->state = MFCINST_FINISHING; } s5p_mfc_decode_one_frame(ctx, last_frame); return 0; } static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *dst_mb; struct s5p_mfc_buf *src_mb; unsigned long src_y_addr, src_c_addr, dst_addr; unsigned int dst_size; spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { mfc_debug(2, "no src buffers\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } if (list_empty(&ctx->dst_queue)) { mfc_debug(2, "no dst buffers\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); src_mb->used = 1; src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_mb->used = 1; dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); dst_size = vb2_plane_size(dst_mb->b, 0); s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_encode_one_frame(ctx); return 0; } static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *temp_vb; /* Initializing decoding - parsing header */ spin_lock_irqsave(&dev->irqlock, flags); mfc_debug(2, "Preparing to init decoding\n"); temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_set_dec_desc_buffer(ctx); mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); s5p_mfc_set_dec_stream_buffer(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_init_decode(ctx); } static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *dst_mb; unsigned long dst_addr; unsigned int dst_size; s5p_mfc_set_enc_ref_buffer(ctx); spin_lock_irqsave(&dev->irqlock, flags); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); dst_size = vb2_plane_size(dst_mb->b, 0); s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_init_encode(ctx); } static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *temp_vb; int ret; /* * Header was parsed now starting processing * First set the output frame buffers */ if (ctx->capture_state != QUEUE_BUFS_MMAPED) { mfc_err("It seems that not all destionation buffers were " "mmaped\nMFC requires that all destination are mmaped " "before starting processing\n"); return -EAGAIN; } spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { mfc_err("Header has been deallocated in the middle of" " initialization\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EIO; } temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); s5p_mfc_set_dec_stream_buffer(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_set_dec_frame_buffer(ctx); if (ret) { mfc_err("Failed to alloc frame mem\n"); ctx->state = MFCINST_ERROR; } return ret; } /* Try running an operation on hardware */ void s5p_mfc_try_run(struct s5p_mfc_dev *dev) { struct s5p_mfc_ctx *ctx; int new_ctx; unsigned int ret = 0; if (test_bit(0, &dev->enter_suspend)) { mfc_debug(1, "Entering suspend so do not schedule any jobs\n"); return; } /* Check whether hardware is not running */ if (test_and_set_bit(0, &dev->hw_lock) != 0) { /* This is perfectly ok, the scheduled ctx should wait */ mfc_debug(1, "Couldn't lock HW\n"); return; } /* Choose the context to run */ new_ctx = s5p_mfc_get_new_ctx(dev); if (new_ctx < 0) { /* No contexts to run */ if (test_and_clear_bit(0, &dev->hw_lock) == 0) { mfc_err("Failed to unlock hardware\n"); return; } mfc_debug(1, "No ctx is scheduled to be run\n"); return; } ctx = dev->ctx[new_ctx]; /* Got context to run in ctx */ /* * Last frame has already been sent to MFC. * Now obtaining frames from MFC buffer */ s5p_mfc_clock_on(); if (ctx->type == MFCINST_DECODER) { s5p_mfc_set_dec_desc_buffer(ctx); switch (ctx->state) { case MFCINST_FINISHING: s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME); break; case MFCINST_RUNNING: ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME); break; case MFCINST_INIT: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_open_inst_cmd(ctx); break; case MFCINST_RETURN_INST: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_close_inst_cmd(ctx); break; case MFCINST_GOT_INST: s5p_mfc_run_init_dec(ctx); break; case MFCINST_HEAD_PARSED: ret = s5p_mfc_run_init_dec_buffers(ctx); mfc_debug(1, "head parsed\n"); break; case MFCINST_RES_CHANGE_INIT: s5p_mfc_run_res_change(ctx); break; case MFCINST_RES_CHANGE_FLUSH: s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME); break; case MFCINST_RES_CHANGE_END: mfc_debug(2, "Finished remaining frames after resolution change\n"); ctx->capture_state = QUEUE_FREE; mfc_debug(2, "Will re-init the codec\n"); s5p_mfc_run_init_dec(ctx); break; default: ret = -EAGAIN; } } else if (ctx->type == MFCINST_ENCODER) { switch (ctx->state) { case MFCINST_FINISHING: case MFCINST_RUNNING: ret = s5p_mfc_run_enc_frame(ctx); break; case MFCINST_INIT: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_open_inst_cmd(ctx); break; case MFCINST_RETURN_INST: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_close_inst_cmd(ctx); break; case MFCINST_GOT_INST: s5p_mfc_run_init_enc(ctx); break; default: ret = -EAGAIN; } } else { mfc_err("Invalid context type: %d\n", ctx->type); ret = -EAGAIN; } if (ret) { /* Free hardware lock */ if (test_and_clear_bit(0, &dev->hw_lock) == 0) mfc_err("Failed to unlock hardware\n"); /* This is in deed imporant, as no operation has been * scheduled, reduce the clock count as no one will * ever do this, because no interrupt related to this try_run * will ever come from hardware. */ s5p_mfc_clock_off(); } } void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq) { struct s5p_mfc_buf *b; int i; while (!list_empty(lh)) { b = list_entry(lh->next, struct s5p_mfc_buf, list); for (i = 0; i < b->b->num_planes; i++) vb2_set_plane_payload(b->b, i, 0); vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR); list_del(&b->list); } }
gpl-2.0
sycolon/android_kernel_lge_g3
arch/arm/mach-vt8500/pwm.c
7874
5740
/* * arch/arm/mach-vt8500/pwm.c * * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/io.h> #include <linux/pwm.h> #include <linux/delay.h> #include <asm/div64.h> #define VT8500_NR_PWMS 4 static DEFINE_MUTEX(pwm_lock); static LIST_HEAD(pwm_list); struct pwm_device { struct list_head node; struct platform_device *pdev; const char *label; void __iomem *regbase; unsigned int use_count; unsigned int pwm_id; }; #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask) { int loops = msecs_to_loops(10); while ((readb(reg) & bitmask) && --loops) cpu_relax(); if (unlikely(!loops)) pr_warning("Waiting for status bits 0x%x to clear timed out\n", bitmask); } int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) { unsigned long long c; unsigned long period_cycles, prescale, pv, dc; if (pwm == NULL || period_ns == 0 || duty_ns > period_ns) return -EINVAL; c = 25000000/2; /* wild guess --- need to implement clocks */ c = c * period_ns; do_div(c, 1000000000); period_cycles = c; if (period_cycles < 1) period_cycles = 1; prescale = (period_cycles - 1) / 4096; pv = period_cycles / (prescale + 1) - 1; if (pv > 4095) pv = 4095; if (prescale > 1023) return -EINVAL; c = (unsigned long long)pv * duty_ns; do_div(c, period_ns); dc = c; pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 1)); writel(prescale, pwm->regbase + 0x4 + (pwm->pwm_id << 4)); pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 2)); writel(pv, pwm->regbase + 0x8 + (pwm->pwm_id << 4)); pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 3)); writel(dc, pwm->regbase + 0xc + (pwm->pwm_id << 4)); return 0; } EXPORT_SYMBOL(pwm_config); int pwm_enable(struct pwm_device *pwm) { pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0)); writel(5, pwm->regbase + (pwm->pwm_id << 4)); return 0; } EXPORT_SYMBOL(pwm_enable); void pwm_disable(struct pwm_device *pwm) { pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0)); writel(0, pwm->regbase + (pwm->pwm_id << 4)); } EXPORT_SYMBOL(pwm_disable); struct pwm_device *pwm_request(int pwm_id, const char *label) { struct pwm_device *pwm; int found = 0; mutex_lock(&pwm_lock); list_for_each_entry(pwm, &pwm_list, node) { if (pwm->pwm_id == pwm_id) { found = 1; break; } } if (found) { if (pwm->use_count == 0) { pwm->use_count++; pwm->label = label; } else { pwm = ERR_PTR(-EBUSY); } } else { pwm = ERR_PTR(-ENOENT); } mutex_unlock(&pwm_lock); return pwm; } EXPORT_SYMBOL(pwm_request); void pwm_free(struct pwm_device *pwm) { mutex_lock(&pwm_lock); if (pwm->use_count) { pwm->use_count--; pwm->label = NULL; } else { pr_warning("PWM device already freed\n"); } mutex_unlock(&pwm_lock); } EXPORT_SYMBOL(pwm_free); static inline void __add_pwm(struct pwm_device *pwm) { mutex_lock(&pwm_lock); list_add_tail(&pwm->node, &pwm_list); mutex_unlock(&pwm_lock); } static int __devinit pwm_probe(struct platform_device *pdev) { struct pwm_device *pwms; struct resource *r; int ret = 0; int i; pwms = kzalloc(sizeof(struct pwm_device) * VT8500_NR_PWMS, GFP_KERNEL); if (pwms == NULL) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } for (i = 0; i < VT8500_NR_PWMS; i++) { pwms[i].use_count = 0; pwms[i].pwm_id = i; pwms[i].pdev = pdev; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { dev_err(&pdev->dev, "no memory resource defined\n"); ret = -ENODEV; goto err_free; } r = request_mem_region(r->start, resource_size(r), pdev->name); if (r == NULL) { dev_err(&pdev->dev, "failed to request memory resource\n"); ret = -EBUSY; goto err_free; } pwms[0].regbase = ioremap(r->start, resource_size(r)); if (pwms[0].regbase == NULL) { dev_err(&pdev->dev, "failed to ioremap() registers\n"); ret = -ENODEV; goto err_free_mem; } for (i = 1; i < VT8500_NR_PWMS; i++) pwms[i].regbase = pwms[0].regbase; for (i = 0; i < VT8500_NR_PWMS; i++) __add_pwm(&pwms[i]); platform_set_drvdata(pdev, pwms); return 0; err_free_mem: release_mem_region(r->start, resource_size(r)); err_free: kfree(pwms); return ret; } static int __devexit pwm_remove(struct platform_device *pdev) { struct pwm_device *pwms; struct resource *r; int i; pwms = platform_get_drvdata(pdev); if (pwms == NULL) return -ENODEV; mutex_lock(&pwm_lock); for (i = 0; i < VT8500_NR_PWMS; i++) list_del(&pwms[i].node); mutex_unlock(&pwm_lock); iounmap(pwms[0].regbase); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(r->start, resource_size(r)); kfree(pwms); return 0; } static struct platform_driver pwm_driver = { .driver = { .name = "vt8500-pwm", .owner = THIS_MODULE, }, .probe = pwm_probe, .remove = __devexit_p(pwm_remove), }; static int __init pwm_init(void) { return platform_driver_register(&pwm_driver); } arch_initcall(pwm_init); static void __exit pwm_exit(void) { platform_driver_unregister(&pwm_driver); } module_exit(pwm_exit); MODULE_LICENSE("GPL");
gpl-2.0
flar2/m8-GPE-5.0.1
drivers/scsi/arm/eesox.c
8130
17113
/* * linux/drivers/acorn/scsi/eesox.c * * Copyright (C) 1997-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is based on experimentation. Hence, it may have made * assumptions about the particular card that I have available, and * may not be reliable! * * Changelog: * 01-10-1997 RMK Created, READONLY version * 15-02-1998 RMK READ/WRITE version * added DMA support and hardware definitions * 14-03-1998 RMK Updated DMA support * Added terminator control * 15-04-1998 RMK Only do PIO if FAS216 will allow it. * 27-06-1998 RMK Changed asm/delay.h to linux/delay.h * 02-04-2000 RMK 0.0.3 Fixed NO_IRQ/NO_DMA problem, updated for new * error handling code. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <asm/pgtable.h> #include "../scsi.h" #include <scsi/scsi_host.h> #include "fas216.h" #include "scsi.h" #include <scsi/scsicam.h> #define EESOX_FAS216_OFFSET 0x3000 #define EESOX_FAS216_SHIFT 5 #define EESOX_DMASTAT 0x2800 #define EESOX_STAT_INTR 0x01 #define EESOX_STAT_DMA 0x02 #define EESOX_CONTROL 0x2800 #define EESOX_INTR_ENABLE 0x04 #define EESOX_TERM_ENABLE 0x02 #define EESOX_RESET 0x01 #define EESOX_DMADATA 0x3800 #define VERSION "1.10 (17/01/2003 2.5.59)" /* * Use term=0,1,0,0,0 to turn terminators on/off */ static int term[MAX_ECARDS] = { 1, 1, 1, 1, 1, 1, 1, 1 }; #define NR_SG 256 struct eesoxscsi_info { FAS216_Info info; struct expansion_card *ec; void __iomem *base; void __iomem *ctl_port; unsigned int control; struct scatterlist sg[NR_SG]; /* Scatter DMA list */ }; /* Prototype: void eesoxscsi_irqenable(ec, irqnr) * Purpose : Enable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqenable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control |= EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } /* Prototype: void eesoxscsi_irqdisable(ec, irqnr) * Purpose : Disable interrupts on EESOX SCSI card * Params : ec - expansion card structure * : irqnr - interrupt number */ static void eesoxscsi_irqdisable(struct expansion_card *ec, int irqnr) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)ec->irq_data; info->control &= ~EESOX_INTR_ENABLE; writeb(info->control, info->ctl_port); } static const expansioncard_ops_t eesoxscsi_ops = { .irqenable = eesoxscsi_irqenable, .irqdisable = eesoxscsi_irqdisable, }; /* Prototype: void eesoxscsi_terminator_ctl(*host, on_off) * Purpose : Turn the EESOX SCSI terminators on or off * Params : host - card to turn on/off * : on_off - !0 to turn on, 0 to turn off */ static void eesoxscsi_terminator_ctl(struct Scsi_Host *host, int on_off) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); if (on_off) info->control |= EESOX_TERM_ENABLE; else info->control &= ~EESOX_TERM_ENABLE; writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } /* Prototype: void eesoxscsi_intr(irq, *dev_id, *regs) * Purpose : handle interrupts from EESOX SCSI card * Params : irq - interrupt number * dev_id - user-defined (Scsi_Host structure) */ static irqreturn_t eesoxscsi_intr(int irq, void *dev_id) { struct eesoxscsi_info *info = dev_id; return fas216_intr(&info->info); } /* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type) * Purpose : initialises DMA/PIO * Params : host - host * SCpnt - command * direction - DMA on to/off of card * min_type - minimum DMA support that we must have for this transfer * Returns : type of transfer to be performed */ static fasdmatype_t eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t direction, fasdmatype_t min_type) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; struct device *dev = scsi_get_device(host); int dmach = info->info.scsi.dma; if (dmach != NO_DMA && (min_type == fasdma_real_all || SCp->this_residual >= 512)) { int bufs, map_dir, dma_dir; bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG); if (direction == DMA_OUT) map_dir = DMA_TO_DEVICE, dma_dir = DMA_MODE_WRITE; else map_dir = DMA_FROM_DEVICE, dma_dir = DMA_MODE_READ; dma_map_sg(dev, info->sg, bufs, map_dir); disable_dma(dmach); set_dma_sg(dmach, info->sg, bufs); set_dma_mode(dmach, dma_dir); enable_dma(dmach); return fasdma_real_all; } /* * We don't do DMA, we only do slow PIO * * Some day, we will do Pseudo DMA */ return fasdma_pseudo; } static void eesoxscsi_buffer_in(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; register const unsigned long mask = 0xffff; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; if (status > length) status = length; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; l2 = readl(reg_dmadata) & mask; l2 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; *(u32 *)buf = l2; buf += 4; length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = readl(reg_dmadata) & mask; l1 |= readl(reg_dmadata) << 16; *(u32 *)buf = l1; buf += 4; length -= 4; continue; } if (status >= 2) { *(u16 *)buf = readl(reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_buffer_out(void *buf, int length, void __iomem *base) { const void __iomem *reg_fas = base + EESOX_FAS216_OFFSET; const void __iomem *reg_dmastat = base + EESOX_DMASTAT; const void __iomem *reg_dmadata = base + EESOX_DMADATA; do { unsigned int status; /* * Interrupt request? */ status = readb(reg_fas + (REG_STAT << EESOX_FAS216_SHIFT)); if (status & STAT_INT) break; /* * DMA request active? */ status = readb(reg_dmastat); if (!(status & EESOX_STAT_DMA)) continue; /* * Get number of bytes in FIFO */ status = readb(reg_fas + (REG_CFIS << EESOX_FAS216_SHIFT)) & CFIS_CF; if (status > 16) status = 16; status = 16 - status; if (status > length) status = length; status &= ~1; /* * Align buffer. */ if (((u32)buf) & 2 && status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; status -= 2; length -= 2; } if (status >= 8) { unsigned long l1, l2; l1 = *(u32 *)buf; buf += 4; l2 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); writel(l2 << 16, reg_dmadata); writel(l2, reg_dmadata); length -= 8; continue; } if (status >= 4) { unsigned long l1; l1 = *(u32 *)buf; buf += 4; writel(l1 << 16, reg_dmadata); writel(l1, reg_dmadata); length -= 4; continue; } if (status >= 2) { writel(*(u16 *)buf << 16, reg_dmadata); buf += 2; length -= 2; } } while (length); } static void eesoxscsi_dma_pseudo(struct Scsi_Host *host, struct scsi_pointer *SCp, fasdmadir_t dir, int transfer_size) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (dir == DMA_IN) { eesoxscsi_buffer_in(SCp->ptr, SCp->this_residual, info->base); } else { eesoxscsi_buffer_out(SCp->ptr, SCp->this_residual, info->base); } } /* Prototype: int eesoxscsi_dma_stop(host, SCpnt) * Purpose : stops DMA/PIO * Params : host - host * SCpnt - command */ static void eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; if (info->info.scsi.dma != NO_DMA) disable_dma(info->info.scsi.dma); } /* Prototype: const char *eesoxscsi_info(struct Scsi_Host * host) * Purpose : returns a descriptive string about this interface, * Params : host - driver host structure to return info for. * Returns : pointer to a static buffer containing null terminated string. */ const char *eesoxscsi_info(struct Scsi_Host *host) { struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; static char string[150]; sprintf(string, "%s (%s) in slot %d v%s terminators o%s", host->hostt->name, info->info.scsi.type, info->ec->slot_no, VERSION, info->control & EESOX_TERM_ENABLE ? "n" : "ff"); return string; } /* Prototype: int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * Purpose : Set a driver specific function * Params : host - host to setup * : buffer - buffer containing string describing operation * : length - length of string * Returns : -EINVAL, or 0 */ static int eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) { int ret = length; if (length >= 9 && strncmp(buffer, "EESOXSCSI", 9) == 0) { buffer += 9; length -= 9; if (length >= 5 && strncmp(buffer, "term=", 5) == 0) { if (buffer[5] == '1') eesoxscsi_terminator_ctl(host, 1); else if (buffer[5] == '0') eesoxscsi_terminator_ctl(host, 0); else ret = -EINVAL; } else ret = -EINVAL; } else ret = -EINVAL; return ret; } /* Prototype: int eesoxscsi_proc_info(char *buffer, char **start, off_t offset, * int length, int host_no, int inout) * Purpose : Return information about the driver to a user process accessing * the /proc filesystem. * Params : buffer - a buffer to write information to * start - a pointer into this buffer set by this routine to the start * of the required information. * offset - offset into information that we have read up to. * length - length of buffer * host_no - host number to return information for * inout - 0 for reading, 1 for writing. * Returns : length of data written to buffer. */ int eesoxscsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { struct eesoxscsi_info *info; char *p = buffer; int pos; if (inout == 1) return eesoxscsi_set_proc_info(host, buffer, length); info = (struct eesoxscsi_info *)host->hostdata; p += sprintf(p, "EESOX SCSI driver v%s\n", VERSION); p += fas216_print_host(&info->info, p); p += sprintf(p, "Term : o%s\n", info->control & EESOX_TERM_ENABLE ? "n" : "ff"); p += fas216_print_stats(&info->info, p); p += fas216_print_devices(&info->info, p); *start = buffer + offset; pos = p - buffer - offset; if (pos > length) pos = length; return pos; } static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; return sprintf(buf, "%d\n", info->control & EESOX_TERM_ENABLE ? 1 : 0); } static ssize_t eesoxscsi_store_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct expansion_card *ec = ECARD_DEV(dev); struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; unsigned long flags; if (len > 1) { spin_lock_irqsave(host->host_lock, flags); if (buf[0] != '0') { info->control |= EESOX_TERM_ENABLE; } else { info->control &= ~EESOX_TERM_ENABLE; } writeb(info->control, info->ctl_port); spin_unlock_irqrestore(host->host_lock, flags); } return len; } static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, eesoxscsi_show_term, eesoxscsi_store_term); static struct scsi_host_template eesox_template = { .module = THIS_MODULE, .proc_info = eesoxscsi_proc_info, .name = "EESOX SCSI", .info = eesoxscsi_info, .queuecommand = fas216_queue_command, .eh_host_reset_handler = fas216_eh_host_reset, .eh_bus_reset_handler = fas216_eh_bus_reset, .eh_device_reset_handler = fas216_eh_device_reset, .eh_abort_handler = fas216_eh_abort, .can_queue = 1, .this_id = 7, .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .proc_name = "eesox", }; static int __devinit eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) { struct Scsi_Host *host; struct eesoxscsi_info *info; void __iomem *base; int ret; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!base) { ret = -ENOMEM; goto out_region; } host = scsi_host_alloc(&eesox_template, sizeof(struct eesoxscsi_info)); if (!host) { ret = -ENOMEM; goto out_region; } ecard_set_drvdata(ec, host); info = (struct eesoxscsi_info *)host->hostdata; info->ec = ec; info->base = base; info->ctl_port = base + EESOX_CONTROL; info->control = term[ec->slot_no] ? EESOX_TERM_ENABLE : 0; writeb(info->control, info->ctl_port); info->info.scsi.io_base = base + EESOX_FAS216_OFFSET; info->info.scsi.io_shift = EESOX_FAS216_SHIFT; info->info.scsi.irq = ec->irq; info->info.scsi.dma = ec->dma; info->info.ifcfg.clockrate = 40; /* MHz */ info->info.ifcfg.select_timeout = 255; info->info.ifcfg.asyncperiod = 200; /* ns */ info->info.ifcfg.sync_max_depth = 7; info->info.ifcfg.cntl3 = CNTL3_FASTSCSI | CNTL3_FASTCLK; info->info.ifcfg.disconnect_ok = 1; info->info.ifcfg.wide_max_size = 0; info->info.ifcfg.capabilities = FASCAP_PSEUDODMA; info->info.dma.setup = eesoxscsi_dma_setup; info->info.dma.pseudo = eesoxscsi_dma_pseudo; info->info.dma.stop = eesoxscsi_dma_stop; ec->irqaddr = base + EESOX_DMASTAT; ec->irqmask = EESOX_STAT_INTR; ecard_setirq(ec, &eesoxscsi_ops, info); device_create_file(&ec->dev, &dev_attr_bus_term); ret = fas216_init(host); if (ret) goto out_free; ret = request_irq(ec->irq, eesoxscsi_intr, 0, "eesoxscsi", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); goto out_remove; } if (info->info.scsi.dma != NO_DMA) { if (request_dma(info->info.scsi.dma, "eesox")) { printk("scsi%d: DMA%d not free, DMA disabled\n", host->host_no, info->info.scsi.dma); info->info.scsi.dma = NO_DMA; } else { set_dma_speed(info->info.scsi.dma, 180); info->info.ifcfg.capabilities |= FASCAP_DMA; info->info.ifcfg.cntl3 |= CNTL3_BS8; } } ret = fas216_add(host, &ec->dev); if (ret == 0) goto out; if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, host); out_remove: fas216_remove(host); out_free: device_remove_file(&ec->dev, &dev_attr_bus_term); scsi_host_put(host); out_region: ecard_release_resources(ec); out: return ret; } static void __devexit eesoxscsi_remove(struct expansion_card *ec) { struct Scsi_Host *host = ecard_get_drvdata(ec); struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata; ecard_set_drvdata(ec, NULL); fas216_remove(host); if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); free_irq(ec->irq, info); device_remove_file(&ec->dev, &dev_attr_bus_term); fas216_release(host); scsi_host_put(host); ecard_release_resources(ec); } static const struct ecard_id eesoxscsi_cids[] = { { MANU_EESOX, PROD_EESOX_SCSI2 }, { 0xffff, 0xffff }, }; static struct ecard_driver eesoxscsi_driver = { .probe = eesoxscsi_probe, .remove = __devexit_p(eesoxscsi_remove), .id_table = eesoxscsi_cids, .drv = { .name = "eesoxscsi", }, }; static int __init eesox_init(void) { return ecard_register_driver(&eesoxscsi_driver); } static void __exit eesox_exit(void) { ecard_remove_driver(&eesoxscsi_driver); } module_init(eesox_init); module_exit(eesox_exit); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("EESOX 'Fast' SCSI driver for Acorn machines"); module_param_array(term, int, NULL, 0); MODULE_PARM_DESC(term, "SCSI bus termination"); MODULE_LICENSE("GPL");
gpl-2.0
TeamJB/kernel_samsung_smdk4412
drivers/media/video/samsung/mali_r3p0/common/mali_memory.c
195
45101
/* * Copyright (C) 2011-2012 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "mali_kernel_common.h" #include "mali_kernel_descriptor_mapping.h" #include "mali_mem_validation.h" #include "mali_memory.h" #include "mali_mmu_page_directory.h" #include "mali_kernel_memory_engine.h" #include "mali_block_allocator.h" #include "mali_kernel_mem_os.h" #include "mali_session.h" #include "mali_l2_cache.h" #include "mali_cluster.h" #include "mali_group.h" #if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 #include "ump_kernel_interface.h" #endif /* kernel side OS functions and user-kernel interface */ #include "mali_osk.h" #include "mali_osk_mali.h" #include "mali_ukk.h" #include "mali_osk_list.h" #include "mali_osk_bitops.h" /** * Per-session memory descriptor mapping table sizes */ #define MALI_MEM_DESCRIPTORS_INIT 64 #define MALI_MEM_DESCRIPTORS_MAX 65536 typedef struct dedicated_memory_info { u32 base; u32 size; struct dedicated_memory_info * next; } dedicated_memory_info; /* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */ #if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 typedef struct ump_mem_allocation { mali_allocation_engine * engine; mali_memory_allocation * descriptor; u32 initial_offset; u32 size_allocated; ump_dd_handle ump_mem; } ump_mem_allocation ; #endif typedef struct external_mem_allocation { mali_allocation_engine * engine; mali_memory_allocation * descriptor; u32 initial_offset; u32 size; } external_mem_allocation; /** * @brief Internal function for unmapping memory * * Worker function for unmapping memory from a user-process. We assume that the * session/descriptor's lock was obtained before entry. For example, the * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this * function to do the actual unmapping. mali_memory_core_session_end() could * also call this directly (depending on compilation options), having locked * the descriptor. * * This function will fail if it is unable to put the MMU in stall mode (which * might be the case if a page fault is also being processed). * * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h" * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. */ static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args ); #if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 static void ump_memory_release(void * ctx, void * handle); static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info); #endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0*/ static void external_memory_release(void * ctx, void * handle); static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info); /* nop functions */ /* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */ static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */ static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size); static void mali_address_manager_release(mali_memory_allocation * descriptor); /* MMU variables */ typedef struct mali_mmu_page_table_allocation { _mali_osk_list_t list; u32 * usage_map; u32 usage_count; u32 num_pages; mali_page_table_block pages; } mali_mmu_page_table_allocation; typedef struct mali_mmu_page_table_allocations { _mali_osk_lock_t *lock; _mali_osk_list_t partial; _mali_osk_list_t full; /* we never hold on to a empty allocation */ } mali_mmu_page_table_allocations; static mali_kernel_mem_address_manager mali_address_manager = { mali_address_manager_allocate, /* allocate */ mali_address_manager_release, /* release */ mali_address_manager_map, /* map_physical */ NULL /* unmap_physical not present*/ }; /* the mmu page table cache */ static struct mali_mmu_page_table_allocations page_table_cache; static mali_kernel_mem_address_manager process_address_manager = { _mali_osk_mem_mapregion_init, /* allocate */ _mali_osk_mem_mapregion_term, /* release */ _mali_osk_mem_mapregion_map, /* map_physical */ _mali_osk_mem_mapregion_unmap /* unmap_physical */ }; static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void); static void mali_mmu_page_table_cache_destroy(void); static mali_allocation_engine memory_engine = NULL; static mali_physical_memory_allocator * physical_memory_allocators = NULL; static dedicated_memory_info * mem_region_registrations = NULL; /* called during module init */ _mali_osk_errcode_t mali_memory_initialize(void) { _mali_osk_errcode_t err; MALI_DEBUG_PRINT(2, ("Memory system initializing\n")); err = mali_mmu_page_table_cache_create(); if(_MALI_OSK_ERR_OK != err) { MALI_ERROR(err); } memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager); MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT); MALI_SUCCESS; } /* called if/when our module is unloaded */ void mali_memory_terminate(void) { MALI_DEBUG_PRINT(2, ("Memory system terminating\n")); mali_mmu_page_table_cache_destroy(); while ( NULL != mem_region_registrations) { dedicated_memory_info * m; m = mem_region_registrations; mem_region_registrations = m->next; _mali_osk_mem_unreqregion(m->base, m->size); _mali_osk_free(m); } while ( NULL != physical_memory_allocators) { mali_physical_memory_allocator * m; m = physical_memory_allocators; physical_memory_allocators = m->next; m->destroy(m); } if (NULL != memory_engine) { mali_allocation_engine_destroy(memory_engine); memory_engine = NULL; } } _mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data * session_data) { MALI_DEBUG_PRINT(5, ("Memory session begin\n")); /* create descriptor mapping table */ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX); if (NULL == session_data->descriptor_mapping) { MALI_ERROR(_MALI_OSK_ERR_NOMEM); } session_data->memory_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_SESSION); if (NULL == session_data->memory_lock) { mali_descriptor_mapping_destroy(session_data->descriptor_mapping); _mali_osk_free(session_data); MALI_ERROR(_MALI_OSK_ERR_FAULT); } /* Init the session's memory allocation list */ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head ); MALI_DEBUG_PRINT(5, ("MMU session begin: success\n")); MALI_SUCCESS; } static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target) { mali_memory_allocation * descriptor; descriptor = (mali_memory_allocation*)map_target; MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target)); MALI_DEBUG_ASSERT(descriptor); mali_allocation_engine_release_memory(memory_engine, descriptor); _mali_osk_free(descriptor); } void mali_memory_session_end(struct mali_session_data *session_data) { MALI_DEBUG_PRINT(3, ("MMU session end\n")); if (NULL == session_data) { MALI_DEBUG_PRINT(1, ("No session data found during session end\n")); return; } #ifndef MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP #if _MALI_OSK_SPECIFIC_INDIRECT_MMAP #error Indirect MMAP specified, but UKK does not have implicit MMAP cleanup. Current implementation does not handle this. #else { _mali_osk_errcode_t err; err = _MALI_OSK_ERR_BUSY; while (err == _MALI_OSK_ERR_BUSY) { /* Lock the session so we can modify the memory list */ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); err = _MALI_OSK_ERR_OK; /* Free all memory engine allocations */ if (0 == _mali_osk_list_empty(&session_data->memory_head)) { mali_memory_allocation *descriptor; mali_memory_allocation *temp; _mali_uk_mem_munmap_s unmap_args; MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n")); unmap_args.ctx = session_data; /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list) { MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n", descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping) ); /* ASSERT that the descriptor's lock references the correct thing */ MALI_DEBUG_ASSERT( descriptor->lock == session_data->memory_lock ); /* Therefore, we have already locked the descriptor */ unmap_args.size = descriptor->size; unmap_args.mapping = descriptor->mapping; unmap_args.cookie = (u32)descriptor; /* * This removes the descriptor from the list, and frees the descriptor * * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since * the only OS we are aware of that requires indirect MMAP also has * implicit mmap cleanup. */ err = _mali_ukk_mem_munmap_internal( &unmap_args ); if (err == _MALI_OSK_ERR_BUSY) { _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); /* * Reason for this; * We where unable to stall the MMU, probably because we are in page fault handling. * Sleep for a while with the session lock released, then try again. * Abnormal termination of programs with running Mali jobs is a normal reason for this. */ _mali_osk_time_ubusydelay(10); break; /* Will jump back into: "while (err == _MALI_OSK_ERR_BUSY)" */ } } } } /* Assert that we really did free everything */ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) ); } #endif /* _MALI_OSK_SPECIFIC_INDIRECT_MMAP */ #else /* Lock the session so we can modify the memory list */ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); #endif /* MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP */ if (NULL != session_data->descriptor_mapping) { mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback); mali_descriptor_mapping_destroy(session_data->descriptor_mapping); session_data->descriptor_mapping = NULL; } _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); /** * @note Could the VMA close handler mean that we use the session data after it was freed? * In which case, would need to refcount the session data, and free on VMA close */ /* Free the lock */ _mali_osk_lock_term( session_data->memory_lock ); return; } _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource) { mali_physical_memory_allocator * allocator; mali_physical_memory_allocator ** next_allocator_list; u32 alloc_order = resource->alloc_order; allocator = mali_os_allocator_create(resource->size, resource->cpu_usage_adjust, resource->description); if (NULL == allocator) { MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); } allocator->alloc_order = alloc_order; /* link in the allocator: insertion into ordered list * resources of the same alloc_order will be Last-in-first */ next_allocator_list = &physical_memory_allocators; while (NULL != *next_allocator_list && (*next_allocator_list)->alloc_order < alloc_order ) { next_allocator_list = &((*next_allocator_list)->next); } allocator->next = (*next_allocator_list); (*next_allocator_list) = allocator; MALI_SUCCESS; } _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource) { mali_physical_memory_allocator * allocator; mali_physical_memory_allocator ** next_allocator_list; dedicated_memory_info * cleanup_data; u32 alloc_order = resource->alloc_order; /* do the low level linux operation first */ /* Request ownership of the memory */ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description)) { MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1)); MALI_ERROR(_MALI_OSK_ERR_FAULT); } /* create generic block allocator object to handle it */ allocator = mali_block_allocator_create(resource->base, resource->cpu_usage_adjust, resource->size, resource->description ); if (NULL == allocator) { MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n")); _mali_osk_mem_unreqregion(resource->base, resource->size); MALI_ERROR(_MALI_OSK_ERR_FAULT); } /* save low level cleanup info */ allocator->alloc_order = alloc_order; cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info)); if (NULL == cleanup_data) { _mali_osk_mem_unreqregion(resource->base, resource->size); allocator->destroy(allocator); MALI_ERROR(_MALI_OSK_ERR_FAULT); } cleanup_data->base = resource->base; cleanup_data->size = resource->size; cleanup_data->next = mem_region_registrations; mem_region_registrations = cleanup_data; /* link in the allocator: insertion into ordered list * resources of the same alloc_order will be Last-in-first */ next_allocator_list = &physical_memory_allocators; while ( NULL != *next_allocator_list && (*next_allocator_list)->alloc_order < alloc_order ) { next_allocator_list = &((*next_allocator_list)->next); } allocator->next = (*next_allocator_list); (*next_allocator_list) = allocator; MALI_SUCCESS; } #if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info) { ump_dd_handle ump_mem; u32 nr_blocks; u32 i; ump_dd_physical_block * ump_blocks; ump_mem_allocation *ret_allocation; MALI_DEBUG_ASSERT_POINTER(ctx); MALI_DEBUG_ASSERT_POINTER(engine); MALI_DEBUG_ASSERT_POINTER(descriptor); MALI_DEBUG_ASSERT_POINTER(alloc_info); ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) ); if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE; ump_mem = (ump_dd_handle)ctx; MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n")); nr_blocks = ump_dd_phys_block_count_get(ump_mem); MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks)); if (nr_blocks == 0) { MALI_DEBUG_PRINT(1, ("No block count\n")); _mali_osk_free( ret_allocation ); return MALI_MEM_ALLOC_INTERNAL_FAILURE; } ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks ); if ( NULL==ump_blocks ) { _mali_osk_free( ret_allocation ); return MALI_MEM_ALLOC_INTERNAL_FAILURE; } if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) { _mali_osk_free(ump_blocks); _mali_osk_free( ret_allocation ); return MALI_MEM_ALLOC_INTERNAL_FAILURE; } /* Store away the initial offset for unmapping purposes */ ret_allocation->initial_offset = *offset; for(i=0; i<nr_blocks; ++i) { MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size)); if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size )) { u32 size_allocated = *offset - ret_allocation->initial_offset; MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n")); /* unmap all previous blocks (if any) */ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 ); _mali_osk_free(ump_blocks); _mali_osk_free(ret_allocation); return MALI_MEM_ALLOC_INTERNAL_FAILURE; } *offset += ump_blocks[i].size; } if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE) { /* Map in an extra virtual guard page at the end of the VMA */ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n")); if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE )) { u32 size_allocated = *offset - ret_allocation->initial_offset; MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n")); /* unmap all previous blocks (if any) */ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 ); _mali_osk_free(ump_blocks); _mali_osk_free(ret_allocation); return MALI_MEM_ALLOC_INTERNAL_FAILURE; } *offset += _MALI_OSK_MALI_PAGE_SIZE; } _mali_osk_free( ump_blocks ); ret_allocation->engine = engine; ret_allocation->descriptor = descriptor; ret_allocation->ump_mem = ump_mem; ret_allocation->size_allocated = *offset - ret_allocation->initial_offset; alloc_info->ctx = NULL; alloc_info->handle = ret_allocation; alloc_info->next = NULL; alloc_info->release = ump_memory_release; return MALI_MEM_ALLOC_FINISHED; } static void ump_memory_release(void * ctx, void * handle) { ump_dd_handle ump_mem; ump_mem_allocation *allocation; allocation = (ump_mem_allocation *)handle; MALI_DEBUG_ASSERT_POINTER( allocation ); ump_mem = allocation->ump_mem; MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem); /* At present, this is a no-op. But, it allows the mali_address_manager to * do unmapping of a subrange in future. */ mali_allocation_engine_unmap_physical( allocation->engine, allocation->descriptor, allocation->initial_offset, allocation->size_allocated, (_mali_osk_mem_mapregion_flags_t)0 ); _mali_osk_free( allocation ); ump_dd_reference_release(ump_mem) ; return; } _mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args ) { ump_dd_handle ump_mem; mali_physical_memory_allocator external_memory_allocator; struct mali_session_data *session_data; mali_memory_allocation * descriptor; int md; MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); session_data = (struct mali_session_data *)args->ctx; MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS); /* check arguments */ /* NULL might be a valid Mali address */ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); /* size must be a multiple of the system page size */ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); MALI_DEBUG_PRINT(3, ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n", args->secure_id, args->mali_address, args->size)); ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ; if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT); descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation)); if (NULL == descriptor) { ump_dd_reference_release(ump_mem); MALI_ERROR(_MALI_OSK_ERR_NOMEM); } descriptor->size = args->size; descriptor->mapping = NULL; descriptor->mali_address = args->mali_address; descriptor->mali_addr_mapping_info = (void*)session_data; descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */ descriptor->lock = session_data->memory_lock; if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) { descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE; } _mali_osk_list_init( &descriptor->list ); if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md)) { ump_dd_reference_release(ump_mem); _mali_osk_free(descriptor); MALI_ERROR(_MALI_OSK_ERR_FAULT); } external_memory_allocator.allocate = ump_memory_commit; external_memory_allocator.allocate_page_table_block = NULL; external_memory_allocator.ctx = ump_mem; external_memory_allocator.name = "UMP Memory"; external_memory_allocator.next = NULL; _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL)) { _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); mali_descriptor_mapping_free(session_data->descriptor_mapping, md); ump_dd_reference_release(ump_mem); _mali_osk_free(descriptor); MALI_ERROR(_MALI_OSK_ERR_NOMEM); } _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); args->cookie = md; MALI_DEBUG_PRINT(5,("Returning from UMP attach\n")); /* All OK */ MALI_SUCCESS; } _mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args ) { mali_memory_allocation * descriptor; struct mali_session_data *session_data; MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); session_data = (struct mali_session_data *)args->ctx; MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS); if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor)) { MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie)); MALI_ERROR(_MALI_OSK_ERR_FAULT); } descriptor = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie); if (NULL != descriptor) { _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); mali_allocation_engine_release_memory(memory_engine, descriptor); _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); _mali_osk_free(descriptor); } MALI_SUCCESS; } #endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 */ static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info) { u32 * data; external_mem_allocation * ret_allocation; MALI_DEBUG_ASSERT_POINTER(ctx); MALI_DEBUG_ASSERT_POINTER(engine); MALI_DEBUG_ASSERT_POINTER(descriptor); MALI_DEBUG_ASSERT_POINTER(alloc_info); ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) ); if ( NULL == ret_allocation ) { return MALI_MEM_ALLOC_INTERNAL_FAILURE; } data = (u32*)ctx; ret_allocation->engine = engine; ret_allocation->descriptor = descriptor; ret_allocation->initial_offset = *offset; alloc_info->ctx = NULL; alloc_info->handle = ret_allocation; alloc_info->next = NULL; alloc_info->release = external_memory_release; MALI_DEBUG_PRINT(5, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1])); if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1])) { MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n")); _mali_osk_free(ret_allocation); return MALI_MEM_ALLOC_INTERNAL_FAILURE; } *offset += data[1]; if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE) { /* Map in an extra virtual guard page at the end of the VMA */ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n")); if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE)) { u32 size_allocated = *offset - ret_allocation->initial_offset; MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n")); /* unmap what we previously mapped */ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 ); _mali_osk_free(ret_allocation); return MALI_MEM_ALLOC_INTERNAL_FAILURE; } *offset += _MALI_OSK_MALI_PAGE_SIZE; } ret_allocation->size = *offset - ret_allocation->initial_offset; return MALI_MEM_ALLOC_FINISHED; } static void external_memory_release(void * ctx, void * handle) { external_mem_allocation * allocation; allocation = (external_mem_allocation *) handle; MALI_DEBUG_ASSERT_POINTER( allocation ); /* At present, this is a no-op. But, it allows the mali_address_manager to * do unmapping of a subrange in future. */ mali_allocation_engine_unmap_physical( allocation->engine, allocation->descriptor, allocation->initial_offset, allocation->size, (_mali_osk_mem_mapregion_flags_t)0 ); _mali_osk_free( allocation ); return; } _mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args ) { mali_physical_memory_allocator external_memory_allocator; struct mali_session_data *session_data; u32 info[2]; mali_memory_allocation * descriptor; int md; MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); session_data = (struct mali_session_data *)args->ctx; MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS); external_memory_allocator.allocate = external_memory_commit; external_memory_allocator.allocate_page_table_block = NULL; external_memory_allocator.ctx = &info[0]; external_memory_allocator.name = "External Memory"; external_memory_allocator.next = NULL; /* check arguments */ /* NULL might be a valid Mali address */ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); /* size must be a multiple of the system page size */ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); MALI_DEBUG_PRINT(3, ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n", (void*)args->phys_addr, (void*)(args->phys_addr + args->size -1), (void*)args->mali_address) ); /* Validate the mali physical range */ if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) { return _MALI_OSK_ERR_FAULT; } info[0] = args->phys_addr; info[1] = args->size; descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation)); if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM); descriptor->size = args->size; descriptor->mapping = NULL; descriptor->mali_address = args->mali_address; descriptor->mali_addr_mapping_info = (void*)session_data; descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */ descriptor->lock = session_data->memory_lock; if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) { descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE; } _mali_osk_list_init( &descriptor->list ); _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL)) { _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(descriptor); MALI_ERROR(_MALI_OSK_ERR_NOMEM); } _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md)) { mali_allocation_engine_release_memory(memory_engine, descriptor); _mali_osk_free(descriptor); MALI_ERROR(_MALI_OSK_ERR_FAULT); } args->cookie = md; MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n")); /* All OK */ MALI_SUCCESS; } _mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args ) { mali_memory_allocation * descriptor; void* old_value; struct mali_session_data *session_data; MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); session_data = (struct mali_session_data *)args->ctx; MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS); if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor)) { MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie)); MALI_ERROR(_MALI_OSK_ERR_FAULT); } old_value = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie); if (NULL != old_value) { _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); mali_allocation_engine_release_memory(memory_engine, descriptor); _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW ); _mali_osk_free(descriptor); } MALI_SUCCESS; } _mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args ) { MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */ MALI_SUCCESS; } _mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args ) { MALI_DEBUG_ASSERT_POINTER(args); MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS); MALI_SUCCESS; } static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor) { struct mali_session_data *session_data; u32 actual_size; MALI_DEBUG_ASSERT_POINTER(descriptor); session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info; actual_size = descriptor->size; if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE) { actual_size += _MALI_OSK_MALI_PAGE_SIZE; } return mali_mmu_pagedir_map(session_data->page_directory, descriptor->mali_address, actual_size); } static void mali_address_manager_release(mali_memory_allocation * descriptor) { const u32 illegal_mali_address = 0xffffffff; struct mali_session_data *session_data; MALI_DEBUG_ASSERT_POINTER(descriptor); /* It is allowed to call this function several times on the same descriptor. When memory is released we set the illegal_mali_address so we can early out here. */ if ( illegal_mali_address == descriptor->mali_address) return; session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info; mali_mmu_pagedir_unmap(session_data->page_directory, descriptor->mali_address, descriptor->size); descriptor->mali_address = illegal_mali_address ; } static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size) { struct mali_session_data *session_data; u32 mali_address; MALI_DEBUG_ASSERT_POINTER(descriptor); MALI_DEBUG_ASSERT_POINTER(phys_addr); session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info; MALI_DEBUG_ASSERT_POINTER(session_data); mali_address = descriptor->mali_address + offset; MALI_DEBUG_PRINT(7, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", *phys_addr, mali_address, size)); mali_mmu_pagedir_update(session_data->page_directory, mali_address, *phys_addr, size); MALI_SUCCESS; } /* This handler registered to mali_mmap for MMU builds */ _mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args ) { struct mali_session_data *session_data; mali_memory_allocation * descriptor; /* validate input */ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); } /* Unpack arguments */ session_data = (struct mali_session_data *)args->ctx; /* validate input */ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); } descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) ); if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); } descriptor->size = args->size; descriptor->mali_address = args->phys_addr; descriptor->mali_addr_mapping_info = (void*)session_data; descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE; descriptor->lock = session_data->memory_lock; _mali_osk_list_init( &descriptor->list ); _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head)) { /* We do not FLUSH nor TLB_ZAP on MMAP, since we do both of those on job start*/ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); args->mapping = descriptor->mapping; args->cookie = (u32)descriptor; MALI_DEBUG_PRINT(7, ("MMAP OK\n")); MALI_SUCCESS; } else { _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW); /* OOM, but not a fatal error */ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n")); _mali_osk_free(descriptor); /* Linux will free the CPU address allocation, userspace client the Mali address allocation */ MALI_ERROR(_MALI_OSK_ERR_FAULT); } } static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args ) { struct mali_session_data *session_data; mali_memory_allocation * descriptor; u32 num_groups = mali_group_get_glob_num_groups(); struct mali_group *group; u32 i; descriptor = (mali_memory_allocation *)args->cookie; MALI_DEBUG_ASSERT_POINTER(descriptor); /** @note args->context unused; we use the memory_session from the cookie */ /* args->mapping and args->size are also discarded. They are only necessary for certain do_munmap implementations. However, they could be used to check the descriptor at this point. */ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info; MALI_DEBUG_ASSERT_POINTER(session_data); /* Unmapping the memory from the mali virtual address space. It is allowed to call this function severeal times, which might happen if zapping below fails. */ mali_allocation_engine_release_pt1_mali_pagetables_unmap(memory_engine, descriptor); #ifdef MALI_UNMAP_FLUSH_ALL_MALI_L2 { u32 number_of_clusters = mali_cluster_get_glob_num_clusters(); for (i = 0; i < number_of_clusters; i++) { struct mali_cluster *cluster; cluster = mali_cluster_get_global_cluster(i); if( mali_cluster_power_is_enabled_get(cluster) ) { mali_cluster_l2_cache_invalidate_all_force(cluster); } } } #endif for (i = 0; i < num_groups; i++) { group = mali_group_get_glob_group(i); mali_group_lock(group); mali_group_remove_session_if_unused(group, session_data); if (mali_group_get_session(group) == session_data) { /* The Zap also does the stall and disable_stall */ mali_bool zap_success = mali_mmu_zap_tlb(mali_group_get_mmu(group)); if (MALI_TRUE != zap_success) { MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n")); mali_group_bottom_half(group, GROUP_EVENT_MMU_PAGE_FAULT); /* The bottom half will also do the unlock */ continue; } } mali_group_unlock(group); } /* Removes the descriptor from the session's memory list, releases physical memory, releases descriptor */ mali_allocation_engine_release_pt2_physical_memory_free(memory_engine, descriptor); _mali_osk_free(descriptor); return _MALI_OSK_ERR_OK; } /* Handler for unmapping memory for MMU builds */ _mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args ) { mali_memory_allocation * descriptor; _mali_osk_lock_t *descriptor_lock; _mali_osk_errcode_t err; descriptor = (mali_memory_allocation *)args->cookie; MALI_DEBUG_ASSERT_POINTER(descriptor); /** @note args->context unused; we use the memory_session from the cookie */ /* args->mapping and args->size are also discarded. They are only necessary for certain do_munmap implementations. However, they could be used to check the descriptor at this point. */ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)descriptor->mali_addr_mapping_info); descriptor_lock = descriptor->lock; /* should point to the session data lock... */ err = _MALI_OSK_ERR_BUSY; while (err == _MALI_OSK_ERR_BUSY) { if (descriptor_lock) { _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW ); } err = _mali_ukk_mem_munmap_internal( args ); if (descriptor_lock) { _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW ); } if (err == _MALI_OSK_ERR_BUSY) { /* * Reason for this; * We where unable to stall the MMU, probably because we are in page fault handling. * Sleep for a while with the session lock released, then try again. * Abnormal termination of programs with running Mali jobs is a normal reason for this. */ _mali_osk_time_ubusydelay(10); } } return err; } u32 _mali_ukk_report_memory_usage(void) { return mali_allocation_engine_memory_usage(physical_memory_allocators); } _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping) { _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); if (0 == _mali_osk_list_empty(&page_table_cache.partial)) { mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list); int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages); MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number)); _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map); alloc->usage_count++; if (alloc->num_pages == alloc->usage_count) { /* full, move alloc to full list*/ _mali_osk_list_move(&alloc->list, &page_table_cache.full); } _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base; *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping); MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page )); MALI_SUCCESS; } else { mali_mmu_page_table_allocation * alloc; /* no free pages, allocate a new one */ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation)); if (NULL == alloc) { _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); *table_page = MALI_INVALID_PAGE; MALI_ERROR(_MALI_OSK_ERR_NOMEM); } _MALI_OSK_INIT_LIST_HEAD(&alloc->list); if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators)) { MALI_DEBUG_PRINT(1, ("No more memory for page tables\n")); _mali_osk_free(alloc); _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); *table_page = MALI_INVALID_PAGE; *mapping = NULL; MALI_ERROR(_MALI_OSK_ERR_NOMEM); } /* create the usage map */ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE; alloc->usage_count = 1; MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages)); alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long)); if (NULL == alloc->usage_map) { MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n")); alloc->pages.release(&alloc->pages); _mali_osk_free(alloc); _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); *table_page = MALI_INVALID_PAGE; *mapping = NULL; MALI_ERROR(_MALI_OSK_ERR_NOMEM); } _mali_osk_set_nonatomic_bit(0, alloc->usage_map); if (alloc->num_pages > 1) { _mali_osk_list_add(&alloc->list, &page_table_cache.partial); } else { _mali_osk_list_add(&alloc->list, &page_table_cache.full); } _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); *table_page = alloc->pages.phys_base; /* return the first page */ *mapping = alloc->pages.mapping; /* Mapping for first page */ MALI_DEBUG_PRINT(4, ("Page table allocated: VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page )); MALI_SUCCESS; } } void mali_mmu_release_table_page(u32 pa) { mali_mmu_page_table_allocation * alloc, * temp_alloc; MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa)); MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa)); _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); /* find the entry this address belongs to */ /* first check the partial list */ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list) { u32 start = alloc->pages.phys_base; u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE; if (pa >= start && pa <= last) { MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map)); _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map); alloc->usage_count--; _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE); if (0 == alloc->usage_count) { /* empty, release whole page alloc */ _mali_osk_list_del(&alloc->list); alloc->pages.release(&alloc->pages); _mali_osk_free(alloc->usage_map); _mali_osk_free(alloc); } _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa)); return; } } /* the check the full list */ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list) { u32 start = alloc->pages.phys_base; u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE; if (pa >= start && pa <= last) { _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map); alloc->usage_count--; _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE); if (0 == alloc->usage_count) { /* empty, release whole page alloc */ _mali_osk_list_del(&alloc->list); alloc->pages.release(&alloc->pages); _mali_osk_free(alloc->usage_map); _mali_osk_free(alloc); } else { /* transfer to partial list */ _mali_osk_list_move(&alloc->list, &page_table_cache.partial); } _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa)); return; } } MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa)); _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW); } static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void) { page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE); MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT ); _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial); _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full); MALI_SUCCESS; } static void mali_mmu_page_table_cache_destroy(void) { mali_mmu_page_table_allocation * alloc, *temp; _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list) { MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count)); _mali_osk_list_del(&alloc->list); alloc->pages.release(&alloc->pages); _mali_osk_free(alloc->usage_map); _mali_osk_free(alloc); } MALI_DEBUG_PRINT_IF(1, 0 == _mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n")); _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list) { MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count)); _mali_osk_list_del(&alloc->list); alloc->pages.release(&alloc->pages); _mali_osk_free(alloc->usage_map); _mali_osk_free(alloc); } _mali_osk_lock_term(page_table_cache.lock); }
gpl-2.0
linuxium/ubuntu-xenial
drivers/staging/fsl-mc/bus/mc-sys.c
195
12753
/* Copyright 2013-2014 Freescale Semiconductor Inc. * * I/O services to send MC commands to the MC hardware * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the above-listed copyright holders nor the * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "../include/mc-sys.h" #include "../include/mc-cmd.h" #include "../include/mc.h" #include <linux/delay.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/device.h> #include "dpmcp.h" /** * Timeout in milliseconds to wait for the completion of an MC command */ #define MC_CMD_COMPLETION_TIMEOUT_MS 500 /* * usleep_range() min and max values used to throttle down polling * iterations while waiting for MC command completion */ #define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10 #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 #define MC_CMD_HDR_READ_CMDID(_hdr) \ ((u16)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S)) /** * Creates an MC I/O object * * @dev: device to be associated with the MC I/O object * @mc_portal_phys_addr: physical address of the MC portal to use * @mc_portal_size: size in bytes of the MC portal * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O * object or NULL if none. * @flags: flags for the new MC I/O object * @new_mc_io: Area to return pointer to newly created MC I/O object * * Returns '0' on Success; Error code otherwise. */ int __must_check fsl_create_mc_io(struct device *dev, phys_addr_t mc_portal_phys_addr, u32 mc_portal_size, struct fsl_mc_device *dpmcp_dev, u32 flags, struct fsl_mc_io **new_mc_io) { int error; struct fsl_mc_io *mc_io; void __iomem *mc_portal_virt_addr; struct resource *res; mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL); if (!mc_io) return -ENOMEM; mc_io->dev = dev; mc_io->flags = flags; mc_io->portal_phys_addr = mc_portal_phys_addr; mc_io->portal_size = mc_portal_size; if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) spin_lock_init(&mc_io->spinlock); else mutex_init(&mc_io->mutex); res = devm_request_mem_region(dev, mc_portal_phys_addr, mc_portal_size, "mc_portal"); if (!res) { dev_err(dev, "devm_request_mem_region failed for MC portal %#llx\n", mc_portal_phys_addr); return -EBUSY; } mc_portal_virt_addr = devm_ioremap_nocache(dev, mc_portal_phys_addr, mc_portal_size); if (!mc_portal_virt_addr) { dev_err(dev, "devm_ioremap_nocache failed for MC portal %#llx\n", mc_portal_phys_addr); return -ENXIO; } mc_io->portal_virt_addr = mc_portal_virt_addr; if (dpmcp_dev) { error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev); if (error < 0) goto error_destroy_mc_io; } *new_mc_io = mc_io; return 0; error_destroy_mc_io: fsl_destroy_mc_io(mc_io); return error; } EXPORT_SYMBOL_GPL(fsl_create_mc_io); /** * Destroys an MC I/O object * * @mc_io: MC I/O object to destroy */ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) { struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; if (dpmcp_dev) fsl_mc_io_unset_dpmcp(mc_io); devm_iounmap(mc_io->dev, mc_io->portal_virt_addr); devm_release_mem_region(mc_io->dev, mc_io->portal_phys_addr, mc_io->portal_size); mc_io->portal_virt_addr = NULL; devm_kfree(mc_io->dev, mc_io); } EXPORT_SYMBOL_GPL(fsl_destroy_mc_io); int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, struct fsl_mc_device *dpmcp_dev) { int error; if (WARN_ON(!dpmcp_dev)) return -EINVAL; if (WARN_ON(mc_io->dpmcp_dev)) return -EINVAL; if (WARN_ON(dpmcp_dev->mc_io)) return -EINVAL; error = dpmcp_open(mc_io, 0, dpmcp_dev->obj_desc.id, &dpmcp_dev->mc_handle); if (error < 0) return error; mc_io->dpmcp_dev = dpmcp_dev; dpmcp_dev->mc_io = mc_io; return 0; } EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp); void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io) { int error; struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; if (WARN_ON(!dpmcp_dev)) return; if (WARN_ON(dpmcp_dev->mc_io != mc_io)) return; error = dpmcp_close(mc_io, 0, dpmcp_dev->mc_handle); if (error < 0) { dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n", error); } mc_io->dpmcp_dev = NULL; dpmcp_dev->mc_io = NULL; } EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp); static int mc_status_to_error(enum mc_cmd_status status) { static const int mc_status_to_error_map[] = { [MC_CMD_STATUS_OK] = 0, [MC_CMD_STATUS_AUTH_ERR] = -EACCES, [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM, [MC_CMD_STATUS_DMA_ERR] = -EIO, [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO, [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT, [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL, [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM, [MC_CMD_STATUS_BUSY] = -EBUSY, [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP, [MC_CMD_STATUS_INVALID_STATE] = -ENODEV, }; if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map))) return -EINVAL; return mc_status_to_error_map[status]; } static const char *mc_status_to_string(enum mc_cmd_status status) { static const char *const status_strings[] = { [MC_CMD_STATUS_OK] = "Command completed successfully", [MC_CMD_STATUS_READY] = "Command ready to be processed", [MC_CMD_STATUS_AUTH_ERR] = "Authentication error", [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege", [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error", [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error", [MC_CMD_STATUS_TIMEOUT] = "Operation timed out", [MC_CMD_STATUS_NO_RESOURCE] = "No resources", [MC_CMD_STATUS_NO_MEMORY] = "No memory available", [MC_CMD_STATUS_BUSY] = "Device is busy", [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation", [MC_CMD_STATUS_INVALID_STATE] = "Invalid state" }; if ((unsigned int)status >= ARRAY_SIZE(status_strings)) return "Unknown MC error"; return status_strings[status]; } /** * mc_write_command - writes a command to a Management Complex (MC) portal * * @portal: pointer to an MC portal * @cmd: pointer to a filled command */ static inline void mc_write_command(struct mc_command __iomem *portal, struct mc_command *cmd) { int i; /* copy command parameters into the portal */ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) writeq(cmd->params[i], &portal->params[i]); /* submit the command by writing the header */ writeq(cmd->header, &portal->header); } /** * mc_read_response - reads the response for the last MC command from a * Management Complex (MC) portal * * @portal: pointer to an MC portal * @resp: pointer to command response buffer * * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. */ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * portal, struct mc_command *resp) { int i; enum mc_cmd_status status; /* Copy command response header from MC portal: */ resp->header = readq(&portal->header); status = MC_CMD_HDR_READ_STATUS(resp->header); if (status != MC_CMD_STATUS_OK) return status; /* Copy command response data from MC portal: */ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) resp->params[i] = readq(&portal->params[i]); return status; } /** * Waits for the completion of an MC command doing preemptible polling. * uslepp_range() is called between polling iterations. * * @mc_io: MC I/O object to be used * @cmd: command buffer to receive MC response * @mc_status: MC command completion status */ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, struct mc_command *cmd, enum mc_cmd_status *mc_status) { enum mc_cmd_status status; unsigned long jiffies_until_timeout = jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); /* * Wait for response from the MC hardware: */ for (;;) { status = mc_read_response(mc_io->portal_virt_addr, cmd); if (status != MC_CMD_STATUS_READY) break; /* * TODO: When MC command completion interrupts are supported * call wait function here instead of usleep_range() */ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS, MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); if (time_after_eq(jiffies, jiffies_until_timeout)) { pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", mc_io->portal_phys_addr, (unsigned int) MC_CMD_HDR_READ_TOKEN(cmd->header), (unsigned int) MC_CMD_HDR_READ_CMDID(cmd->header)); return -ETIMEDOUT; } } *mc_status = status; return 0; } /** * Waits for the completion of an MC command doing atomic polling. * udelay() is called between polling iterations. * * @mc_io: MC I/O object to be used * @cmd: command buffer to receive MC response * @mc_status: MC command completion status */ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, struct mc_command *cmd, enum mc_cmd_status *mc_status) { enum mc_cmd_status status; unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) % MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0); for (;;) { status = mc_read_response(mc_io->portal_virt_addr, cmd); if (status != MC_CMD_STATUS_READY) break; udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; if (timeout_usecs == 0) { pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", mc_io->portal_phys_addr, (unsigned int) MC_CMD_HDR_READ_TOKEN(cmd->header), (unsigned int) MC_CMD_HDR_READ_CMDID(cmd->header)); return -ETIMEDOUT; } } *mc_status = status; return 0; } /** * Sends a command to the MC device using the given MC I/O object * * @mc_io: MC I/O object to be used * @cmd: command to be sent * * Returns '0' on Success; Error code otherwise. */ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) { int error; enum mc_cmd_status status; unsigned long irq_flags = 0; if (WARN_ON(in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))) return -EINVAL; if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) spin_lock_irqsave(&mc_io->spinlock, irq_flags); else mutex_lock(&mc_io->mutex); /* * Send command to the MC hardware: */ mc_write_command(mc_io->portal_virt_addr, cmd); /* * Wait for response from the MC hardware: */ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) error = mc_polling_wait_preemptible(mc_io, cmd, &status); else error = mc_polling_wait_atomic(mc_io, cmd, &status); if (error < 0) goto common_exit; if (status != MC_CMD_STATUS_OK) { pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", mc_io->portal_phys_addr, (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header), mc_status_to_string(status), (unsigned int)status); error = mc_status_to_error(status); goto common_exit; } error = 0; common_exit: if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) spin_unlock_irqrestore(&mc_io->spinlock, irq_flags); else mutex_unlock(&mc_io->mutex); return error; } EXPORT_SYMBOL(mc_send_command);
gpl-2.0
Potin/linux-am33x-04.06.00.10
drivers/staging/gma500/mdfld_output.c
195
4795
/* * Copyright (c) 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicensen * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Thomas Eaton <thomas.g.eaton@intel.com> * Scott Rowe <scott.m.rowe@intel.com> */ #include <linux/init.h> #include <linux/moduleparam.h> #include "mdfld_dsi_dbi.h" #include "mdfld_dsi_dpi.h" #include "mdfld_dsi_output.h" #include "mdfld_output.h" #include "mdfld_dsi_dbi_dpu.h" #include "displays/tpo_cmd.h" #include "displays/tpo_vid.h" #include "displays/tmd_cmd.h" #include "displays/tmd_vid.h" #include "displays/pyr_cmd.h" #include "displays/pyr_vid.h" /* #include "displays/hdmi.h" */ static int mdfld_dual_mipi; static int mdfld_hdmi; static int mdfld_dpu; module_param(mdfld_dual_mipi, int, 0600); MODULE_PARM_DESC(mdfld_dual_mipi, "Enable dual MIPI configuration"); module_param(mdfld_hdmi, int, 0600); MODULE_PARM_DESC(mdfld_hdmi, "Enable Medfield HDMI"); module_param(mdfld_dpu, int, 0600); MODULE_PARM_DESC(mdfld_dpu, "Enable Medfield DPU"); /* For now a single type per device is all we cope with */ int mdfld_get_panel_type(struct drm_device *dev, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; return dev_priv->panel_id; } int mdfld_panel_dpi(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; switch (dev_priv->panel_id) { case TMD_VID: case TPO_VID: case PYR_VID: return true; case TMD_CMD: case TPO_CMD: case PYR_CMD: default: return false; } } static int init_panel(struct drm_device *dev, int mipi_pipe, int p_type) { struct panel_funcs *p_cmd_funcs; struct panel_funcs *p_vid_funcs; /* Oh boy ... FIXME */ p_cmd_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL); if (p_cmd_funcs == NULL) return -ENODEV; p_vid_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL); if (p_vid_funcs == NULL) { kfree(p_cmd_funcs); return -ENODEV; } switch (p_type) { case TPO_CMD: tpo_cmd_init(dev, p_cmd_funcs); mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL); break; case TPO_VID: tpo_vid_init(dev, p_vid_funcs); mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs); break; case TMD_CMD: /*tmd_cmd_init(dev, p_cmd_funcs); */ mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL); break; case TMD_VID: tmd_vid_init(dev, p_vid_funcs); mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs); break; case PYR_CMD: pyr_cmd_init(dev, p_cmd_funcs); mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL); break; case PYR_VID: mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs); break; case TPO: /* TPO panel supports both cmd & vid interfaces */ tpo_cmd_init(dev, p_cmd_funcs); tpo_vid_init(dev, p_vid_funcs); mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, p_vid_funcs); break; case TMD: break; case PYR: break; #if 0 case HDMI: dev_dbg(dev->dev, "Initializing HDMI"); mdfld_hdmi_init(dev, &dev_priv->mode_dev); break; #endif default: dev_err(dev->dev, "Unsupported interface %d", p_type); return -ENODEV; } return 0; } int mdfld_output_init(struct drm_device *dev) { int type; /* MIPI panel 1 */ type = mdfld_get_panel_type(dev, 0); dev_info(dev->dev, "panel 1: type is %d\n", type); init_panel(dev, 0, type); if (mdfld_dual_mipi) { /* MIPI panel 2 */ type = mdfld_get_panel_type(dev, 2); dev_info(dev->dev, "panel 2: type is %d\n", type); init_panel(dev, 2, type); } if (mdfld_hdmi) /* HDMI panel */ init_panel(dev, 0, HDMI); return 0; } void mdfld_output_setup(struct drm_device *dev) { /* FIXME: this is not the right place for this stuff ! */ if (IS_MFLD(dev)) { if (mdfld_dpu) mdfld_dbi_dpu_init(dev); else mdfld_dbi_dsr_init(dev); } }
gpl-2.0
yakir-Yang/linux
drivers/scsi/atp870u.c
451
60131
/* * Copyright (C) 1997 Wu Ching Chen * 2.1.x update (C) 1998 Krzysztof G. Baranowski * 2.5.x update (C) 2002 Red Hat * 2.6.x update (C) 2004 Red Hat * * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes * * Wu Ching Chen : NULL pointer fixes 2000/06/02 * support atp876 chip * enable 32 bit fifo transfer * support cdrom & remove device run ultra speed * fix disconnect bug 2000/12/21 * support atp880 chip lvd u160 2001/05/15 * fix prd table bug 2001/09/12 (7.1) * * atp885 support add by ACARD Hao Ping Lian 2005/01/05 */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "atp870u.h" static struct scsi_host_template atp870u_template; static void send_s870(struct atp_unit *dev,unsigned char c); static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode); static inline void atp_writeb_base(struct atp_unit *atp, u8 reg, u8 val) { outb(val, atp->baseport + reg); } static inline void atp_writew_base(struct atp_unit *atp, u8 reg, u16 val) { outw(val, atp->baseport + reg); } static inline void atp_writeb_io(struct atp_unit *atp, u8 channel, u8 reg, u8 val) { outb(val, atp->ioport[channel] + reg); } static inline void atp_writew_io(struct atp_unit *atp, u8 channel, u8 reg, u16 val) { outw(val, atp->ioport[channel] + reg); } static inline void atp_writeb_pci(struct atp_unit *atp, u8 channel, u8 reg, u8 val) { outb(val, atp->pciport[channel] + reg); } static inline void atp_writel_pci(struct atp_unit *atp, u8 channel, u8 reg, u32 val) { outl(val, atp->pciport[channel] + reg); } static inline u8 atp_readb_base(struct atp_unit *atp, u8 reg) { return inb(atp->baseport + reg); } static inline u16 atp_readw_base(struct atp_unit *atp, u8 reg) { return inw(atp->baseport + reg); } static inline u32 atp_readl_base(struct atp_unit *atp, u8 reg) { return inl(atp->baseport + reg); } static inline u8 atp_readb_io(struct atp_unit *atp, u8 channel, u8 reg) { return inb(atp->ioport[channel] + reg); } static inline u16 atp_readw_io(struct atp_unit *atp, u8 channel, u8 reg) { return inw(atp->ioport[channel] + reg); } static inline u8 atp_readb_pci(struct atp_unit *atp, u8 channel, u8 reg) { return inb(atp->pciport[channel] + reg); } static inline bool is880(struct atp_unit *atp) { return atp->pdev->device == ATP880_DEVID1 || atp->pdev->device == ATP880_DEVID2; } static inline bool is885(struct atp_unit *atp) { return atp->pdev->device == ATP885_DEVID; } static irqreturn_t atp870u_intr_handle(int irq, void *dev_id) { unsigned long flags; unsigned short int id; unsigned char i, j, c, target_id, lun,cmdp; unsigned char *prd; struct scsi_cmnd *workreq; unsigned long adrcnt, k; #ifdef ED_DBGP unsigned long l; #endif struct Scsi_Host *host = dev_id; struct atp_unit *dev = (struct atp_unit *)&host->hostdata; for (c = 0; c < 2; c++) { j = atp_readb_io(dev, c, 0x1f); if ((j & 0x80) != 0) break; dev->in_int[c] = 0; } if ((j & 0x80) == 0) return IRQ_NONE; #ifdef ED_DBGP printk("atp870u_intr_handle enter\n"); #endif dev->in_int[c] = 1; cmdp = atp_readb_io(dev, c, 0x10); if (dev->working[c] != 0) { if (is885(dev)) { if ((atp_readb_io(dev, c, 0x16) & 0x80) == 0) atp_writeb_io(dev, c, 0x16, (atp_readb_io(dev, c, 0x16) | 0x80)); } if ((atp_readb_pci(dev, c, 0x00) & 0x08) != 0) { for (k=0; k < 1000; k++) { if ((atp_readb_pci(dev, c, 2) & 0x08) == 0) break; if ((atp_readb_pci(dev, c, 2) & 0x01) == 0) break; } } atp_writeb_pci(dev, c, 0, 0x00); i = atp_readb_io(dev, c, 0x17); if (is885(dev)) atp_writeb_pci(dev, c, 2, 0x06); target_id = atp_readb_io(dev, c, 0x15); /* * Remap wide devices onto id numbers */ if ((target_id & 0x40) != 0) { target_id = (target_id & 0x07) | 0x08; } else { target_id &= 0x07; } if ((j & 0x40) != 0) { if (dev->last_cmd[c] == 0xff) { dev->last_cmd[c] = target_id; } dev->last_cmd[c] |= 0x40; } if (is885(dev)) dev->r1f[c][target_id] |= j; #ifdef ED_DBGP printk("atp870u_intr_handle status = %x\n",i); #endif if (i == 0x85) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (is885(dev)) { adrcnt = 0; ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); if (dev->id[c][target_id].last_len != adrcnt) { k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; } #ifdef ED_DBGP printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len); #endif } /* * Flip wide */ if (dev->wide_id[c] != 0) { atp_writeb_io(dev, c, 0x1b, 0x01); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) atp_writeb_io(dev, c, 0x1b, 0x01); } /* * Issue more commands */ spin_lock_irqsave(dev->host->host_lock, flags); if (((dev->quhd[c] != dev->quend[c]) || (dev->last_cmd[c] != 0xff)) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870\n"); #endif send_s870(dev,c); } spin_unlock_irqrestore(dev->host->host_lock, flags); /* * Done */ dev->in_int[c] = 0; #ifdef ED_DBGP printk("Status 0x85 return\n"); #endif return IRQ_HANDLED; } if (i == 0x40) { dev->last_cmd[c] |= 0x40; dev->in_int[c] = 0; return IRQ_HANDLED; } if (i == 0x21) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } adrcnt = 0; ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; atp_writeb_io(dev, c, 0x10, 0x41); atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; return IRQ_HANDLED; } if (is885(dev)) { if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) { if ((i == 0x4c) || (i == 0x8c)) i=0x48; else i=0x49; } } if ((i == 0x80) || (i == 0x8f)) { #ifdef ED_DBGP printk(KERN_DEBUG "Device reselect\n"); #endif lun = 0; if (cmdp == 0x44 || i == 0x80) lun = atp_readb_io(dev, c, 0x1d) & 0x07; else { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (cmdp == 0x41) { #ifdef ED_DBGP printk("cmdp = 0x41\n"); #endif adrcnt = 0; ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); k = dev->id[c][target_id].last_len; k -= adrcnt; dev->id[c][target_id].tran_len = k; dev->id[c][target_id].last_len = adrcnt; atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; return IRQ_HANDLED; } else { #ifdef ED_DBGP printk("cmdp != 0x41\n"); #endif atp_writeb_io(dev, c, 0x10, 0x46); dev->id[c][target_id].dirct = 0x00; atp_writeb_io(dev, c, 0x12, 0x00); atp_writeb_io(dev, c, 0x13, 0x00); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; return IRQ_HANDLED; } } if (dev->last_cmd[c] != 0xff) { dev->last_cmd[c] |= 0x40; } if (is885(dev)) { j = atp_readb_base(dev, 0x29) & 0xfe; atp_writeb_base(dev, 0x29, j); } else atp_writeb_io(dev, c, 0x10, 0x45); target_id = atp_readb_io(dev, c, 0x16); /* * Remap wide identifiers */ if ((target_id & 0x10) != 0) { target_id = (target_id & 0x07) | 0x08; } else { target_id &= 0x07; } if (is885(dev)) atp_writeb_io(dev, c, 0x10, 0x45); workreq = dev->id[c][target_id].curr_req; #ifdef ED_DBGP scmd_printk(KERN_DEBUG, workreq, "CDB"); for (l = 0; l < workreq->cmd_len; l++) printk(KERN_DEBUG " %x",workreq->cmnd[l]); printk("\n"); #endif atp_writeb_io(dev, c, 0x0f, lun); atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); adrcnt = dev->id[c][target_id].tran_len; k = dev->id[c][target_id].last_len; atp_writeb_io(dev, c, 0x12, ((unsigned char *) &k)[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) &k)[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) &k)[0]); #ifdef ED_DBGP printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, atp_readb_io(dev, c, 0x14), atp_readb_io(dev, c, 0x13), atp_readb_io(dev, c, 0x12)); #endif /* Remap wide */ j = target_id; if (target_id > 7) { j = (j & 0x07) | 0x40; } /* Add direction */ j |= dev->id[c][target_id].dirct; atp_writeb_io(dev, c, 0x15, j); atp_writeb_io(dev, c, 0x16, 0x80); /* enable 32 bit fifo transfer */ if (is885(dev)) { i = atp_readb_pci(dev, c, 1) & 0xf3; //j=workreq->cmnd[0]; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { i |= 0x0c; } atp_writeb_pci(dev, c, 1, i); } else if (is880(dev)) { if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); else atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f); } else { if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); else atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3); } j = 0; id = 1; id = id << target_id; /* * Is this a wide device */ if ((id & dev->wide_id[c]) != 0) { j |= 0x01; } atp_writeb_io(dev, c, 0x1b, j); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) atp_writeb_io(dev, c, 0x1b, j); if (dev->id[c][target_id].last_len == 0) { atp_writeb_io(dev, c, 0x18, 0x08); dev->in_int[c] = 0; #ifdef ED_DBGP printk("dev->id[c][target_id].last_len = 0\n"); #endif return IRQ_HANDLED; } #ifdef ED_DBGP printk("target_id = %d adrcnt = %d\n",target_id,adrcnt); #endif prd = dev->id[c][target_id].prd_pos; while (adrcnt != 0) { id = ((unsigned short int *)prd)[2]; if (id == 0) { k = 0x10000; } else { k = id; } if (k > adrcnt) { ((unsigned short int *)prd)[2] = (unsigned short int) (k - adrcnt); ((unsigned long *)prd)[0] += adrcnt; adrcnt = 0; dev->id[c][target_id].prd_pos = prd; } else { adrcnt -= k; dev->id[c][target_id].prdaddr += 0x08; prd += 0x08; if (adrcnt == 0) { dev->id[c][target_id].prd_pos = prd; } } } atp_writel_pci(dev, c, 0x04, dev->id[c][target_id].prdaddr); #ifdef ED_DBGP printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr); #endif if (!is885(dev)) { atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); } /* * Check transfer direction */ if (dev->id[c][target_id].dirct != 0) { atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x01); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct != 0\n"); #endif return IRQ_HANDLED; } atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x09); dev->in_int[c] = 0; #ifdef ED_DBGP printk("status 0x80 return dirct = 0\n"); #endif return IRQ_HANDLED; } /* * Current scsi request on this target */ workreq = dev->id[c][target_id].curr_req; if (i == 0x42 || i == 0x16) { if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (i == 0x16) { workreq->result = atp_readb_io(dev, c, 0x0f); if (((dev->r1f[c][target_id] & 0x10) != 0) && is885(dev)) { printk(KERN_WARNING "AEC67162 CRC ERROR !\n"); workreq->result = 0x02; } } else workreq->result = 0x02; if (is885(dev)) { j = atp_readb_base(dev, 0x29) | 0x01; atp_writeb_base(dev, 0x29, j); } /* * Complete the command */ scsi_dma_unmap(workreq); spin_lock_irqsave(dev->host->host_lock, flags); (*workreq->scsi_done) (workreq); #ifdef ED_DBGP printk("workreq->scsi_done\n"); #endif /* * Clear it off the queue */ dev->id[c][target_id].curr_req = NULL; dev->working[c]--; spin_unlock_irqrestore(dev->host->host_lock, flags); /* * Take it back wide */ if (dev->wide_id[c] != 0) { atp_writeb_io(dev, c, 0x1b, 0x01); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) atp_writeb_io(dev, c, 0x1b, 0x01); } /* * If there is stuff to send and nothing going then send it */ spin_lock_irqsave(dev->host->host_lock, flags); if (((dev->last_cmd[c] != 0xff) || (dev->quhd[c] != dev->quend[c])) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870(scsi_done)\n"); #endif send_s870(dev,c); } spin_unlock_irqrestore(dev->host->host_lock, flags); dev->in_int[c] = 0; return IRQ_HANDLED; } if ((dev->last_cmd[c] & 0xf0) != 0x40) { dev->last_cmd[c] = 0xff; } if (i == 0x4f) { i = 0x89; } i &= 0x0f; if (i == 0x09) { atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); atp_writeb_io(dev, c, 0x10, 0x41); if (is885(dev)) { k = dev->id[c][target_id].last_len; atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); dev->id[c][target_id].dirct = 0x00; } else { dev->id[c][target_id].dirct = 0x00; } atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x09); dev->in_int[c] = 0; return IRQ_HANDLED; } if (i == 0x08) { atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); atp_writeb_io(dev, c, 0x10, 0x41); if (is885(dev)) { k = dev->id[c][target_id].last_len; atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); } atp_writeb_io(dev, c, 0x15, atp_readb_io(dev, c, 0x15) | 0x20); dev->id[c][target_id].dirct = 0x20; atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x01); dev->in_int[c] = 0; return IRQ_HANDLED; } if (i == 0x0a) atp_writeb_io(dev, c, 0x10, 0x30); else atp_writeb_io(dev, c, 0x10, 0x46); dev->id[c][target_id].dirct = 0x00; atp_writeb_io(dev, c, 0x12, 0x00); atp_writeb_io(dev, c, 0x13, 0x00); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); } dev->in_int[c] = 0; return IRQ_HANDLED; } /** * atp870u_queuecommand - Queue SCSI command * @req_p: request block * @done: completion function * * Queue a command to the ATP queue. Called with the host lock held. */ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, void (*done) (struct scsi_cmnd *)) { unsigned char c; unsigned int m; struct atp_unit *dev; struct Scsi_Host *host; c = scmd_channel(req_p); req_p->sense_buffer[0]=0; scsi_set_resid(req_p, 0); if (scmd_channel(req_p) > 1) { req_p->result = 0x00040000; done(req_p); #ifdef ED_DBGP printk("atp870u_queuecommand : req_p->device->channel > 1\n"); #endif return 0; } host = req_p->device->host; dev = (struct atp_unit *)&host->hostdata; m = 1; m = m << scmd_id(req_p); /* * Fake a timeout for missing targets */ if ((m & dev->active_id[c]) == 0) { req_p->result = 0x00040000; done(req_p); return 0; } if (done) { req_p->scsi_done = done; } else { #ifdef ED_DBGP printk( "atp870u_queuecommand: done can't be NULL\n"); #endif req_p->result = 0; done(req_p); return 0; } /* * Count new command */ dev->quend[c]++; if (dev->quend[c] >= qcnt) { dev->quend[c] = 0; } /* * Check queue state */ if (dev->quhd[c] == dev->quend[c]) { if (dev->quend[c] == 0) { dev->quend[c] = qcnt; } #ifdef ED_DBGP printk("atp870u_queuecommand : dev->quhd[c] == dev->quend[c]\n"); #endif dev->quend[c]--; req_p->result = 0x00020000; done(req_p); return 0; } dev->quereq[c][dev->quend[c]] = req_p; #ifdef ED_DBGP printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],atp_readb_io(dev, c, 0x1c),c,dev->in_int[c],c,dev->in_snd[c]); #endif if ((atp_readb_io(dev, c, 0x1c) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) { #ifdef ED_DBGP printk("Call sent_s870(atp870u_queuecommand)\n"); #endif send_s870(dev,c); } #ifdef ED_DBGP printk("atp870u_queuecommand : exit\n"); #endif return 0; } static DEF_SCSI_QCMD(atp870u_queuecommand) /** * send_s870 - send a command to the controller * @host: host * * On entry there is work queued to be done. We move some of that work to the * controller itself. * * Caller holds the host lock. */ static void send_s870(struct atp_unit *dev,unsigned char c) { struct scsi_cmnd *workreq = NULL; unsigned int i;//,k; unsigned char j, target_id; unsigned char *prd; unsigned short int w; unsigned long l, bttl = 0; unsigned long sg_count; if (dev->in_snd[c] != 0) { #ifdef ED_DBGP printk("cmnd in_snd\n"); #endif return; } #ifdef ED_DBGP printk("Sent_s870 enter\n"); #endif dev->in_snd[c] = 1; if ((dev->last_cmd[c] != 0xff) && ((dev->last_cmd[c] & 0x40) != 0)) { dev->last_cmd[c] &= 0x0f; workreq = dev->id[c][dev->last_cmd[c]].curr_req; if (!workreq) { dev->last_cmd[c] = 0xff; if (dev->quhd[c] == dev->quend[c]) { dev->in_snd[c] = 0; return; } } } if (!workreq) { if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) { dev->in_snd[c] = 0; return; } dev->working[c]++; j = dev->quhd[c]; dev->quhd[c]++; if (dev->quhd[c] >= qcnt) dev->quhd[c] = 0; workreq = dev->quereq[c][dev->quhd[c]]; if (dev->id[c][scmd_id(workreq)].curr_req != NULL) { dev->quhd[c] = j; dev->working[c]--; dev->in_snd[c] = 0; return; } dev->id[c][scmd_id(workreq)].curr_req = workreq; dev->last_cmd[c] = scmd_id(workreq); } if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 || atp_readb_io(dev, c, 0x1c) != 0) { #ifdef ED_DBGP printk("Abort to Send\n"); #endif dev->last_cmd[c] |= 0x40; dev->in_snd[c] = 0; return; } #ifdef ED_DBGP printk("OK to Send\n"); scmd_printk(KERN_DEBUG, workreq, "CDB"); for(i=0;i<workreq->cmd_len;i++) { printk(" %x",workreq->cmnd[i]); } printk("\n"); #endif l = scsi_bufflen(workreq); if (is885(dev)) { j = atp_readb_base(dev, 0x29) & 0xfe; atp_writeb_base(dev, 0x29, j); dev->r1f[c][scmd_id(workreq)] = 0; } if (workreq->cmnd[0] == READ_CAPACITY) { if (l > 8) l = 8; } if (workreq->cmnd[0] == 0x00) { l = 0; } j = 0; target_id = scmd_id(workreq); /* * Wide ? */ w = 1; w = w << target_id; if ((w & dev->wide_id[c]) != 0) { j |= 0x01; } atp_writeb_io(dev, c, 0x1b, j); while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) { atp_writeb_pci(dev, c, 0x1b, j); #ifdef ED_DBGP printk("send_s870 while loop 1\n"); #endif } /* * Write the command */ atp_writeb_io(dev, c, 0x00, workreq->cmd_len); atp_writeb_io(dev, c, 0x01, 0x2c); if (is885(dev)) atp_writeb_io(dev, c, 0x02, 0x7f); else atp_writeb_io(dev, c, 0x02, 0xcf); for (i = 0; i < workreq->cmd_len; i++) atp_writeb_io(dev, c, 0x03 + i, workreq->cmnd[i]); atp_writeb_io(dev, c, 0x0f, workreq->device->lun); /* * Write the target */ atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); #ifdef ED_DBGP printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp); #endif sg_count = scsi_dma_map(workreq); /* * Write transfer size */ atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]); atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]); atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]); j = target_id; dev->id[c][j].last_len = l; dev->id[c][j].tran_len = 0; #ifdef ED_DBGP printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len); #endif /* * Flip the wide bits */ if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } /* * Check transfer direction */ if (workreq->sc_data_direction == DMA_TO_DEVICE) atp_writeb_io(dev, c, 0x15, j | 0x20); else atp_writeb_io(dev, c, 0x15, j); atp_writeb_io(dev, c, 0x16, atp_readb_io(dev, c, 0x16) | 0x80); atp_writeb_io(dev, c, 0x16, 0x80); dev->id[c][target_id].dirct = 0; if (l == 0) { if (atp_readb_io(dev, c, 0x1c) == 0) { #ifdef ED_DBGP printk("change SCSI_CMD_REG 0x08\n"); #endif atp_writeb_io(dev, c, 0x18, 0x08); } else dev->last_cmd[c] |= 0x40; dev->in_snd[c] = 0; return; } prd = dev->id[c][target_id].prd_table; dev->id[c][target_id].prd_pos = prd; /* * Now write the request list. Either as scatter/gather or as * a linear chain. */ if (l) { struct scatterlist *sgpnt; i = 0; scsi_for_each_sg(workreq, sgpnt, sg_count, j) { bttl = sg_dma_address(sgpnt); l=sg_dma_len(sgpnt); #ifdef ED_DBGP printk("1. bttl %x, l %x\n",bttl, l); #endif while (l > 0x10000) { (((u16 *) (prd))[i + 3]) = 0x0000; (((u16 *) (prd))[i + 2]) = 0x0000; (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); l -= 0x10000; bttl += 0x10000; i += 0x04; } (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl); (((u16 *) (prd))[i + 2]) = cpu_to_le16(l); (((u16 *) (prd))[i + 3]) = 0; i += 0x04; } (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000); #ifdef ED_DBGP printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3])); printk("2. bttl %x, l %x\n",bttl, l); #endif } #ifdef ED_DBGP printk("send_s870: prdaddr_2 0x%8x target_id %d\n", dev->id[c][target_id].prdaddr,target_id); #endif dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus; atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); atp_writeb_pci(dev, c, 2, 0x06); atp_writeb_pci(dev, c, 2, 0x00); if (is885(dev)) { j = atp_readb_pci(dev, c, 1) & 0xf3; if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) { j |= 0x0c; } atp_writeb_pci(dev, c, 1, j); } else if (is880(dev)) { if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0); else atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f); } else { if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08); else atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3); } if(workreq->sc_data_direction == DMA_TO_DEVICE) { dev->id[c][target_id].dirct = 0x20; if (atp_readb_io(dev, c, 0x1c) == 0) { atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x01); #ifdef ED_DBGP printk( "start DMA(to target)\n"); #endif } else { dev->last_cmd[c] |= 0x40; } dev->in_snd[c] = 0; return; } if (atp_readb_io(dev, c, 0x1c) == 0) { atp_writeb_io(dev, c, 0x18, 0x08); atp_writeb_pci(dev, c, 0, 0x09); #ifdef ED_DBGP printk( "start DMA(to host)\n"); #endif } else { dev->last_cmd[c] |= 0x40; } dev->in_snd[c] = 0; return; } static unsigned char fun_scam(struct atp_unit *dev, unsigned short int *val) { unsigned short int i, k; unsigned char j; atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ k = atp_readw_io(dev, 0, 0x1c); j = (unsigned char) (k >> 8); if ((k & 0x8000) != 0) /* DB7 all release? */ i = 0; } *val |= 0x4000; /* assert DB6 */ atp_writew_io(dev, 0, 0x1c, *val); *val &= 0xdfff; /* assert DB5 */ atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) != 0) /* DB5 all release? */ i = 0; } *val |= 0x8000; /* no DB4-0, assert DB7 */ *val &= 0xe0ff; atp_writew_io(dev, 0, 0x1c, *val); *val &= 0xbfff; /* release DB6 */ atp_writew_io(dev, 0, 0x1c, *val); for (i = 0; i < 10; i++) { /* stable >= bus settle delay(400 ns) */ if ((atp_readw_io(dev, 0, 0x1c) & 0x4000) != 0) /* DB6 all release? */ i = 0; } return j; } static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on) { unsigned char i, j, k; unsigned long n; unsigned short int m, assignid_map, val; unsigned char mbuf[33], quintet[2]; struct atp_unit *dev = (struct atp_unit *)&host->hostdata; static unsigned char g2q_tab[8] = { 0x38, 0x31, 0x32, 0x2b, 0x34, 0x2d, 0x2e, 0x27 }; /* I can't believe we need this before we've even done anything. Remove it * and see if anyone bitches. for (i = 0; i < 0x10; i++) { udelay(0xffff); } */ atp_writeb_io(dev, 0, 1, 0x08); atp_writeb_io(dev, 0, 2, 0x7f); atp_writeb_io(dev, 0, 0x11, 0x20); if ((scam_on & 0x40) == 0) { return; } m = 1; m <<= dev->host_id[0]; j = 16; if (!wide_chip) { m |= 0xff00; j = 8; } assignid_map = m; atp_writeb_io(dev, 0, 0x02, 0x02); /* 2*2=4ms,3EH 2/32*3E=3.9ms */ atp_writeb_io(dev, 0, 0x03, 0); atp_writeb_io(dev, 0, 0x04, 0); atp_writeb_io(dev, 0, 0x05, 0); atp_writeb_io(dev, 0, 0x06, 0); atp_writeb_io(dev, 0, 0x07, 0); atp_writeb_io(dev, 0, 0x08, 0); for (i = 0; i < j; i++) { m = 1; m = m << i; if ((m & assignid_map) != 0) { continue; } atp_writeb_io(dev, 0, 0x0f, 0); atp_writeb_io(dev, 0, 0x12, 0); atp_writeb_io(dev, 0, 0x13, 0); atp_writeb_io(dev, 0, 0x14, 0); if (i > 7) { k = (i & 0x07) | 0x40; } else { k = i; } atp_writeb_io(dev, 0, 0x15, k); if (wide_chip) atp_writeb_io(dev, 0, 0x1b, 0x01); else atp_writeb_io(dev, 0, 0x1b, 0x00); do { atp_writeb_io(dev, 0, 0x18, 0x09); while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0x00) cpu_relax(); k = atp_readb_io(dev, 0, 0x17); if ((k == 0x85) || (k == 0x42)) break; if (k != 0x16) atp_writeb_io(dev, 0, 0x10, 0x41); } while (k != 0x16); if ((k == 0x85) || (k == 0x42)) continue; assignid_map |= m; } atp_writeb_io(dev, 0, 0x02, 0x7f); atp_writeb_io(dev, 0, 0x1b, 0x02); udelay(2); val = 0x0080; /* bsy */ atp_writew_io(dev, 0, 0x1c, val); val |= 0x0040; /* sel */ atp_writew_io(dev, 0, 0x1c, val); val |= 0x0004; /* msg */ atp_writew_io(dev, 0, 0x1c, val); udelay(2); /* 2 deskew delay(45ns*2=90ns) */ val &= 0x007f; /* no bsy */ atp_writew_io(dev, 0, 0x1c, val); mdelay(128); val &= 0x00fb; /* after 1ms no msg */ atp_writew_io(dev, 0, 0x1c, val); while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0) ; udelay(2); udelay(100); for (n = 0; n < 0x30000; n++) if ((atp_readb_io(dev, 0, 0x1c) & 0x80) != 0) /* bsy ? */ break; if (n < 0x30000) for (n = 0; n < 0x30000; n++) if ((atp_readb_io(dev, 0, 0x1c) & 0x81) == 0x0081) { udelay(2); val |= 0x8003; /* io,cd,db7 */ atp_writew_io(dev, 0, 0x1c, val); udelay(2); val &= 0x00bf; /* no sel */ atp_writew_io(dev, 0, 0x1c, val); udelay(2); break; } while (1) { /* * The funny division into multiple delays is to accomodate * arches like ARM where udelay() multiplies its argument by * a large number to initialize a loop counter. To avoid * overflow, the maximum supported udelay is 2000 microseconds. * * XXX it would be more polite to find a way to use msleep() */ mdelay(2); udelay(48); if ((atp_readb_io(dev, 0, 0x1c) & 0x80) == 0x00) { /* bsy ? */ atp_writew_io(dev, 0, 0x1c, 0); atp_writeb_io(dev, 0, 0x1b, 0); atp_writeb_io(dev, 0, 0x15, 0); atp_writeb_io(dev, 0, 0x18, 0x09); while ((atp_readb_io(dev, 0, 0x1f) & 0x80) == 0) cpu_relax(); atp_readb_io(dev, 0, 0x17); return; } val &= 0x00ff; /* synchronization */ val |= 0x3f00; fun_scam(dev, &val); udelay(2); val &= 0x00ff; /* isolation */ val |= 0x2000; fun_scam(dev, &val); udelay(2); i = 8; j = 0; while (1) { if ((atp_readw_io(dev, 0, 0x1c) & 0x2000) == 0) continue; udelay(2); val &= 0x00ff; /* get ID_STRING */ val |= 0x2000; k = fun_scam(dev, &val); if ((k & 0x03) == 0) break; mbuf[j] <<= 0x01; mbuf[j] &= 0xfe; if ((k & 0x02) != 0) mbuf[j] |= 0x01; i--; if (i > 0) continue; j++; i = 8; } /* isolation complete.. */ /* mbuf[32]=0; printk(" \n%x %x %x %s\n ",assignid_map,mbuf[0],mbuf[1],&mbuf[2]); */ i = 15; j = mbuf[0]; if ((j & 0x20) != 0) { /* bit5=1:ID up to 7 */ i = 7; } if ((j & 0x06) != 0) { /* IDvalid? */ k = mbuf[1]; while (1) { m = 1; m <<= k; if ((m & assignid_map) == 0) break; if (k > 0) k--; else break; } } if ((m & assignid_map) != 0) { /* srch from max acceptable ID# */ k = i; /* max acceptable ID# */ while (1) { m = 1; m <<= k; if ((m & assignid_map) == 0) break; if (k > 0) k--; else break; } } /* k=binID#, */ assignid_map |= m; if (k < 8) { quintet[0] = 0x38; /* 1st dft ID<8 */ } else { quintet[0] = 0x31; /* 1st ID>=8 */ } k &= 0x07; quintet[1] = g2q_tab[k]; val &= 0x00ff; /* AssignID 1stQuintet,AH=001xxxxx */ m = quintet[0] << 8; val |= m; fun_scam(dev, &val); val &= 0x00ff; /* AssignID 2ndQuintet,AH=001xxxxx */ m = quintet[1] << 8; val |= m; fun_scam(dev, &val); } } static void atp870u_free_tables(struct Scsi_Host *host) { struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; int j, k; for (j=0; j < 2; j++) { for (k = 0; k < 16; k++) { if (!atp_dev->id[j][k].prd_table) continue; pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus); atp_dev->id[j][k].prd_table = NULL; } } } static int atp870u_init_tables(struct Scsi_Host *host) { struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata; int c,k; for(c=0;c < 2;c++) { for(k=0;k<16;k++) { atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus)); if (!atp_dev->id[c][k].prd_table) { printk("atp870u_init_tables fail\n"); atp870u_free_tables(host); return -ENOMEM; } atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus; atp_dev->id[c][k].devsp=0x20; atp_dev->id[c][k].devtype = 0x7f; atp_dev->id[c][k].curr_req = NULL; } atp_dev->active_id[c] = 0; atp_dev->wide_id[c] = 0; atp_dev->host_id[c] = 0x07; atp_dev->quhd[c] = 0; atp_dev->quend[c] = 0; atp_dev->last_cmd[c] = 0xff; atp_dev->in_snd[c] = 0; atp_dev->in_int[c] = 0; for (k = 0; k < qcnt; k++) { atp_dev->quereq[c][k] = NULL; } for (k = 0; k < 16; k++) { atp_dev->id[c][k].curr_req = NULL; atp_dev->sp[c][k] = 0x04; } } return 0; } static void atp_set_host_id(struct atp_unit *atp, u8 c, u8 host_id) { atp_writeb_io(atp, c, 0, host_id | 0x08); atp_writeb_io(atp, c, 0x18, 0); while ((atp_readb_io(atp, c, 0x1f) & 0x80) == 0) mdelay(1); atp_readb_io(atp, c, 0x17); atp_writeb_io(atp, c, 1, 8); atp_writeb_io(atp, c, 2, 0x7f); atp_writeb_io(atp, c, 0x11, 0x20); } static void atp870_init(struct Scsi_Host *shpnt) { struct atp_unit *atpdev = shost_priv(shpnt); struct pci_dev *pdev = atpdev->pdev; unsigned char k, host_id; u8 scam_on; bool wide_chip = (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7610 && pdev->revision == 4) || (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612UW) || (pdev->device == PCI_DEVICE_ID_ARTOP_AEC7612SUW); pci_read_config_byte(pdev, 0x49, &host_id); dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: IO:%lx, IRQ:%d.\n", shpnt->io_port, shpnt->irq); atpdev->ioport[0] = shpnt->io_port; atpdev->pciport[0] = shpnt->io_port + 0x20; host_id &= 0x07; atpdev->host_id[0] = host_id; scam_on = atp_readb_pci(atpdev, 0, 2); atpdev->global_map[0] = atp_readb_base(atpdev, 0x2d); atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x2e); if (atpdev->ultra_map[0] == 0) { scam_on = 0x00; atpdev->global_map[0] = 0x20; atpdev->ultra_map[0] = 0xffff; } if (pdev->revision > 0x07) /* check if atp876 chip */ atp_writeb_base(atpdev, 0x3e, 0x00); /* enable terminator */ k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10; atp_writeb_base(atpdev, 0x3a, k); atp_writeb_base(atpdev, 0x3a, k & 0xdf); mdelay(32); atp_writeb_base(atpdev, 0x3a, k); mdelay(32); atp_set_host_id(atpdev, 0, host_id); tscam(shpnt, wide_chip, scam_on); atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) | 0x10); atp_is(atpdev, 0, wide_chip, 0); atp_writeb_base(atpdev, 0x3a, atp_readb_base(atpdev, 0x3a) & 0xef); atp_writeb_base(atpdev, 0x3b, atp_readb_base(atpdev, 0x3b) | 0x20); shpnt->max_id = wide_chip ? 16 : 8; shpnt->this_id = host_id; } static void atp880_init(struct Scsi_Host *shpnt) { struct atp_unit *atpdev = shost_priv(shpnt); struct pci_dev *pdev = atpdev->pdev; unsigned char k, m, host_id; unsigned int n; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80); atpdev->ioport[0] = shpnt->io_port + 0x40; atpdev->pciport[0] = shpnt->io_port + 0x28; host_id = atp_readb_base(atpdev, 0x39) >> 4; dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n", shpnt->io_port, shpnt->irq); atpdev->host_id[0] = host_id; atpdev->global_map[0] = atp_readb_base(atpdev, 0x35); atpdev->ultra_map[0] = atp_readw_base(atpdev, 0x3c); n = 0x3f09; while (n < 0x4000) { m = 0; atp_writew_base(atpdev, 0x34, n); n += 0x0002; if (atp_readb_base(atpdev, 0x30) == 0xff) break; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); atp_writew_base(atpdev, 0x34, n); n += 0x0002; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); atp_writew_base(atpdev, 0x34, n); n += 0x0002; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); atp_writew_base(atpdev, 0x34, n); n += 0x0002; atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x30); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x31); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x32); atpdev->sp[0][m++] = atp_readb_base(atpdev, 0x33); n += 0x0018; } atp_writew_base(atpdev, 0x34, 0); atpdev->ultra_map[0] = 0; atpdev->async[0] = 0; for (k = 0; k < 16; k++) { n = 1 << k; if (atpdev->sp[0][k] > 1) atpdev->ultra_map[0] |= n; else if (atpdev->sp[0][k] == 0) atpdev->async[0] |= n; } atpdev->async[0] = ~(atpdev->async[0]); atp_writeb_base(atpdev, 0x35, atpdev->global_map[0]); k = atp_readb_base(atpdev, 0x38) & 0x80; atp_writeb_base(atpdev, 0x38, k); atp_writeb_base(atpdev, 0x3b, 0x20); mdelay(32); atp_writeb_base(atpdev, 0x3b, 0); mdelay(32); atp_readb_io(atpdev, 0, 0x1b); atp_readb_io(atpdev, 0, 0x17); atp_set_host_id(atpdev, 0, host_id); tscam(shpnt, true, atp_readb_base(atpdev, 0x22)); atp_is(atpdev, 0, true, atp_readb_base(atpdev, 0x3f) & 0x40); atp_writeb_base(atpdev, 0x38, 0xb0); shpnt->max_id = 16; shpnt->this_id = host_id; } static void atp885_init(struct Scsi_Host *shpnt) { struct atp_unit *atpdev = shost_priv(shpnt); struct pci_dev *pdev = atpdev->pdev; unsigned char k, m, c; unsigned int n; unsigned char setupdata[2][16]; dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n", shpnt->io_port, shpnt->irq); atpdev->ioport[0] = shpnt->io_port + 0x80; atpdev->ioport[1] = shpnt->io_port + 0xc0; atpdev->pciport[0] = shpnt->io_port + 0x40; atpdev->pciport[1] = shpnt->io_port + 0x50; c = atp_readb_base(atpdev, 0x29); atp_writeb_base(atpdev, 0x29, c | 0x04); n = 0x1f80; while (n < 0x2000) { atp_writew_base(atpdev, 0x3c, n); if (atp_readl_base(atpdev, 0x38) == 0xffffffff) break; for (m = 0; m < 2; m++) { atpdev->global_map[m] = 0; for (k = 0; k < 4; k++) { atp_writew_base(atpdev, 0x3c, n++); ((unsigned long *)&setupdata[m][0])[k] = atp_readl_base(atpdev, 0x38); } for (k = 0; k < 4; k++) { atp_writew_base(atpdev, 0x3c, n++); ((unsigned long *)&atpdev->sp[m][0])[k] = atp_readl_base(atpdev, 0x38); } n += 8; } } c = atp_readb_base(atpdev, 0x29); atp_writeb_base(atpdev, 0x29, c & 0xfb); for (c = 0; c < 2; c++) { atpdev->ultra_map[c] = 0; atpdev->async[c] = 0; for (k = 0; k < 16; k++) { n = 1 << k; if (atpdev->sp[c][k] > 1) atpdev->ultra_map[c] |= n; else if (atpdev->sp[c][k] == 0) atpdev->async[c] |= n; } atpdev->async[c] = ~(atpdev->async[c]); if (atpdev->global_map[c] == 0) { k = setupdata[c][1]; if ((k & 0x40) != 0) atpdev->global_map[c] |= 0x20; k &= 0x07; atpdev->global_map[c] |= k; if ((setupdata[c][2] & 0x04) != 0) atpdev->global_map[c] |= 0x08; atpdev->host_id[c] = setupdata[c][0] & 0x07; } } k = atp_readb_base(atpdev, 0x28) & 0x8f; k |= 0x10; atp_writeb_base(atpdev, 0x28, k); atp_writeb_pci(atpdev, 0, 1, 0x80); atp_writeb_pci(atpdev, 1, 1, 0x80); mdelay(100); atp_writeb_pci(atpdev, 0, 1, 0); atp_writeb_pci(atpdev, 1, 1, 0); mdelay(1000); atp_readb_io(atpdev, 0, 0x1b); atp_readb_io(atpdev, 0, 0x17); atp_readb_io(atpdev, 1, 0x1b); atp_readb_io(atpdev, 1, 0x17); k = atpdev->host_id[0]; if (k > 7) k = (k & 0x07) | 0x40; atp_set_host_id(atpdev, 0, k); k = atpdev->host_id[1]; if (k > 7) k = (k & 0x07) | 0x40; atp_set_host_id(atpdev, 1, k); mdelay(600); /* this delay used to be called tscam_885() */ dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n"); atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7); atp_writeb_io(atpdev, 0, 0x16, 0x80); dev_info(&pdev->dev, "Scanning Channel B SCSI Device ...\n"); atp_is(atpdev, 1, true, atp_readb_io(atpdev, 1, 0x1b) >> 7); atp_writeb_io(atpdev, 1, 0x16, 0x80); k = atp_readb_base(atpdev, 0x28) & 0xcf; k |= 0xc0; atp_writeb_base(atpdev, 0x28, k); k = atp_readb_base(atpdev, 0x1f) | 0x80; atp_writeb_base(atpdev, 0x1f, k); k = atp_readb_base(atpdev, 0x29) | 0x01; atp_writeb_base(atpdev, 0x29, k); shpnt->max_id = 16; shpnt->max_lun = (atpdev->global_map[0] & 0x07) + 1; shpnt->max_channel = 1; shpnt->this_id = atpdev->host_id[0]; } /* return non-zero on detection */ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *shpnt = NULL; struct atp_unit *atpdev; int err; if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610 && pdev->revision < 2) { dev_err(&pdev->dev, "ATP850S chips (AEC6710L/F cards) are not supported.\n"); return -ENODEV; } err = pci_enable_device(pdev); if (err) goto fail; if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); err = -EIO; goto disable_device; } err = pci_request_regions(pdev, "atp870u"); if (err) goto disable_device; pci_set_master(pdev); err = -ENOMEM; shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); if (!shpnt) goto release_region; atpdev = shost_priv(shpnt); atpdev->host = shpnt; atpdev->pdev = pdev; pci_set_drvdata(pdev, atpdev); shpnt->io_port = pci_resource_start(pdev, 0); shpnt->io_port &= 0xfffffff8; shpnt->n_io_port = pci_resource_len(pdev, 0); atpdev->baseport = shpnt->io_port; shpnt->unique_id = shpnt->io_port; shpnt->irq = pdev->irq; err = atp870u_init_tables(shpnt); if (err) { dev_err(&pdev->dev, "Unable to allocate tables for Acard controller\n"); goto unregister; } if (is880(atpdev)) atp880_init(shpnt); else if (is885(atpdev)) atp885_init(shpnt); else atp870_init(shpnt); err = request_irq(shpnt->irq, atp870u_intr_handle, IRQF_SHARED, "atp870u", shpnt); if (err) { dev_err(&pdev->dev, "Unable to allocate IRQ %d.\n", shpnt->irq); goto free_tables; } err = scsi_add_host(shpnt, &pdev->dev); if (err) goto scsi_add_fail; scsi_scan_host(shpnt); return 0; scsi_add_fail: free_irq(shpnt->irq, shpnt); free_tables: atp870u_free_tables(shpnt); unregister: scsi_host_put(shpnt); release_region: pci_release_regions(pdev); disable_device: pci_disable_device(pdev); fail: return err; } /* The abort command does not leave the device in a clean state where it is available to be used again. Until this gets worked out, we will leave it commented out. */ static int atp870u_abort(struct scsi_cmnd * SCpnt) { unsigned char j, k, c; struct scsi_cmnd *workrequ; struct atp_unit *dev; struct Scsi_Host *host; host = SCpnt->device->host; dev = (struct atp_unit *)&host->hostdata; c = scmd_channel(SCpnt); printk(" atp870u: abort Channel = %x \n", c); printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]); printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]); for (j = 0; j < 0x18; j++) { printk(" r%2x=%2x", j, atp_readb_io(dev, c, j)); } printk(" r1c=%2x", atp_readb_io(dev, c, 0x1c)); printk(" r1f=%2x in_snd=%2x ", atp_readb_io(dev, c, 0x1f), dev->in_snd[c]); printk(" d00=%2x", atp_readb_pci(dev, c, 0x00)); printk(" d02=%2x", atp_readb_pci(dev, c, 0x02)); for(j=0;j<16;j++) { if (dev->id[c][j].curr_req != NULL) { workrequ = dev->id[c][j].curr_req; printk("\n que cdb= "); for (k=0; k < workrequ->cmd_len; k++) { printk(" %2x ",workrequ->cmnd[k]); } printk(" last_lenu= %x ",(unsigned int)dev->id[c][j].last_len); } } return SUCCESS; } static const char *atp870u_info(struct Scsi_Host *notused) { static char buffer[128]; strcpy(buffer, "ACARD AEC-6710/6712/67160 PCI Ultra/W/LVD SCSI-3 Adapter Driver V2.6+ac "); return buffer; } static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) { seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n" "Adapter Configuration:\n"); seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); seq_printf(m, " IRQ: %d\n", HBAptr->irq); return 0; } static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev, sector_t capacity, int *ip) { int heads, sectors, cylinders; heads = 64; sectors = 32; cylinders = (unsigned long)capacity / (heads * sectors); if (cylinders > 1024) { heads = 255; sectors = 63; cylinders = (unsigned long)capacity / (heads * sectors); } ip[0] = heads; ip[1] = sectors; ip[2] = cylinders; return 0; } static void atp870u_remove (struct pci_dev *pdev) { struct atp_unit *devext = pci_get_drvdata(pdev); struct Scsi_Host *pshost = devext->host; scsi_remove_host(pshost); free_irq(pshost->irq, pshost); pci_release_regions(pdev); pci_disable_device(pdev); atp870u_free_tables(pshost); scsi_host_put(pshost); } MODULE_LICENSE("GPL"); static struct scsi_host_template atp870u_template = { .module = THIS_MODULE, .name = "atp870u" /* name */, .proc_name = "atp870u", .show_info = atp870u_show_info, .info = atp870u_info /* info */, .queuecommand = atp870u_queuecommand /* queuecommand */, .eh_abort_handler = atp870u_abort /* abort */, .bios_param = atp870u_biosparam /* biosparm */, .can_queue = qcnt /* can_queue */, .this_id = 7 /* SCSI ID */, .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/, .use_clustering = ENABLE_CLUSTERING, .max_sectors = ATP870U_MAX_SECTORS, }; static struct pci_device_id atp870u_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7610) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612UW) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612U) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612S) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612D) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612SUW) }, { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_8060) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, atp870u_id_table); static struct pci_driver atp870u_driver = { .id_table = atp870u_id_table, .name = "atp870u", .probe = atp870u_probe, .remove = atp870u_remove, }; module_pci_driver(atp870u_driver); static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode) { unsigned char i, j, k, rmb, n; unsigned short int m; static unsigned char mbuf[512]; static unsigned char satn[9] = { 0, 0, 0, 0, 0, 0, 0, 6, 6 }; static unsigned char inqd[9] = { 0x12, 0, 0, 0, 0x24, 0, 0, 0x24, 6 }; static unsigned char synn[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; unsigned char synu[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; static unsigned char synw[6] = { 0x80, 1, 3, 1, 0x19, 0x0e }; static unsigned char synw_870[6] = { 0x80, 1, 3, 1, 0x0c, 0x07 }; unsigned char synuw[6] = { 0x80, 1, 3, 1, 0x0a, 0x0e }; static unsigned char wide[6] = { 0x80, 1, 2, 3, 1, 0 }; static unsigned char u3[9] = { 0x80, 1, 6, 4, 0x09, 00, 0x0e, 0x01, 0x02 }; for (i = 0; i < 16; i++) { if (!wide_chip && (i > 7)) break; m = 1; m = m << i; if ((m & dev->active_id[c]) != 0) { continue; } if (i == dev->host_id[c]) { printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[c]); continue; } atp_writeb_io(dev, c, 0x1b, wide_chip ? 0x01 : 0x00); atp_writeb_io(dev, c, 1, 0x08); atp_writeb_io(dev, c, 2, 0x7f); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); j = i; if ((j & 0x08) != 0) { j = (j & 0x07) | 0x40; } atp_writeb_io(dev, c, 0x15, j); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); dev->active_id[c] |= m; atp_writeb_io(dev, c, 0x10, 0x30); if (is885(dev) || is880(dev)) atp_writeb_io(dev, c, 0x14, 0x00); else /* result of is870() merge - is this a bug? */ atp_writeb_io(dev, c, 0x04, 0x00); phase_cmd: atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { atp_writeb_io(dev, c, 0x10, 0x41); goto phase_cmd; } sel_ok: atp_writeb_io(dev, c, 3, inqd[0]); atp_writeb_io(dev, c, 4, inqd[1]); atp_writeb_io(dev, c, 5, inqd[2]); atp_writeb_io(dev, c, 6, inqd[3]); atp_writeb_io(dev, c, 7, inqd[4]); atp_writeb_io(dev, c, 8, inqd[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, inqd[6]); atp_writeb_io(dev, c, 0x14, inqd[7]); atp_writeb_io(dev, c, 0x18, inqd[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); if (wide_chip) atp_writeb_io(dev, c, 0x1b, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); j = 0; rd_inq_data: k = atp_readb_io(dev, c, 0x1f); if ((k & 0x01) != 0) { mbuf[j++] = atp_readb_io(dev, c, 0x19); goto rd_inq_data; } if ((k & 0x80) == 0) { goto rd_inq_data; } j = atp_readb_io(dev, c, 0x17); if (j == 0x16) { goto inq_ok; } atp_writeb_io(dev, c, 0x10, 0x46); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, 0); atp_writeb_io(dev, c, 0x14, 0); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x16) goto sel_ok; inq_ok: mbuf[36] = 0; printk(KERN_INFO " ID: %2d %s\n", i, &mbuf[8]); dev->id[c][i].devtype = mbuf[0]; rmb = mbuf[1]; n = mbuf[7]; if (!wide_chip) goto not_wide; if ((mbuf[7] & 0x60) == 0) { goto not_wide; } if (is885(dev) || is880(dev)) { if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) goto not_wide; } else { /* result of is870() merge - is this a bug? */ if ((dev->global_map[c] & 0x20) == 0) goto not_wide; } if (lvdmode == 0) { goto chg_wide; } if (dev->sp[c][i] != 0x04) // force u2 { goto chg_wide; } atp_writeb_io(dev, c, 0x1b, 0x01); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); try_u3: j = 0; atp_writeb_io(dev, c, 0x14, 0x09); atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, u3[j++]); cpu_relax(); } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto try_u3; } continue; u3p_out: atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, 0); cpu_relax(); } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_in: atp_writeb_io(dev, c, 0x14, 0x09); atp_writeb_io(dev, c, 0x18, 0x20); k = 0; u3p_in1: j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0) { mbuf[k++] = atp_readb_io(dev, c, 0x19); goto u3p_in1; } if ((j & 0x80) == 0x00) { goto u3p_in1; } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto u3p_in; } if (j == 0x0a) { goto u3p_cmd; } if (j == 0x0e) { goto u3p_out; } continue; u3p_cmd: atp_writeb_io(dev, c, 0x10, 0x30); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { if (j == 0x4e) { goto u3p_out; } continue; } if (mbuf[0] != 0x01) { goto chg_wide; } if (mbuf[1] != 0x06) { goto chg_wide; } if (mbuf[2] != 0x04) { goto chg_wide; } if (mbuf[3] == 0x09) { m = 1; m = m << i; dev->wide_id[c] |= m; dev->id[c][i].devsp = 0xce; #ifdef ED_DBGP printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp); #endif continue; } chg_wide: atp_writeb_io(dev, c, 0x1b, 0x01); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); try_wide: j = 0; atp_writeb_io(dev, c, 0x14, 0x05); atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, wide[j++]); cpu_relax(); } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto try_wide; } continue; widep_out: atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) atp_writeb_io(dev, c, 0x19, 0); cpu_relax(); } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_in: atp_writeb_io(dev, c, 0x14, 0xff); atp_writeb_io(dev, c, 0x18, 0x20); k = 0; widep_in1: j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0) { mbuf[k++] = atp_readb_io(dev, c, 0x19); goto widep_in1; } if ((j & 0x80) == 0x00) { goto widep_in1; } j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto widep_in; } if (j == 0x0a) { goto widep_cmd; } if (j == 0x0e) { goto widep_out; } continue; widep_cmd: atp_writeb_io(dev, c, 0x10, 0x30); atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { if (j == 0x4e) { goto widep_out; } continue; } if (mbuf[0] != 0x01) { goto not_wide; } if (mbuf[1] != 0x02) { goto not_wide; } if (mbuf[2] != 0x03) { goto not_wide; } if (mbuf[3] != 0x01) { goto not_wide; } m = 1; m = m << i; dev->wide_id[c] |= m; not_wide: if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { m = 1; m = m << i; if ((dev->async[c] & m) != 0) { goto set_sync; } } continue; set_sync: if ((!is885(dev) && !is880(dev)) || (dev->sp[c][i] == 0x02)) { synu[4] = 0x0c; synuw[4] = 0x0c; } else { if (dev->sp[c][i] >= 0x03) { synu[4] = 0x0a; synuw[4] = 0x0a; } } j = 0; if ((m & dev->wide_id[c]) != 0) { j |= 0x01; } atp_writeb_io(dev, c, 0x1b, j); atp_writeb_io(dev, c, 3, satn[0]); atp_writeb_io(dev, c, 4, satn[1]); atp_writeb_io(dev, c, 5, satn[2]); atp_writeb_io(dev, c, 6, satn[3]); atp_writeb_io(dev, c, 7, satn[4]); atp_writeb_io(dev, c, 8, satn[5]); atp_writeb_io(dev, c, 0x0f, 0); atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); atp_writeb_io(dev, c, 0x12, 0); atp_writeb_io(dev, c, 0x13, satn[6]); atp_writeb_io(dev, c, 0x14, satn[7]); atp_writeb_io(dev, c, 0x18, satn[8]); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) continue; while (atp_readb_io(dev, c, 0x17) != 0x8e) cpu_relax(); try_sync: j = 0; atp_writeb_io(dev, c, 0x14, 0x06); atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) { if ((m & dev->wide_id[c]) != 0) { if (is885(dev) || is880(dev)) { if ((m & dev->ultra_map[c]) != 0) { atp_writeb_io(dev, c, 0x19, synuw[j++]); } else { atp_writeb_io(dev, c, 0x19, synw[j++]); } } else atp_writeb_io(dev, c, 0x19, synw_870[j++]); } else { if ((m & dev->ultra_map[c]) != 0) { atp_writeb_io(dev, c, 0x19, synu[j++]); } else { atp_writeb_io(dev, c, 0x19, synn[j++]); } } } } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17) & 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto try_sync; } continue; phase_outs: atp_writeb_io(dev, c, 0x18, 0x20); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) { if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0x00) atp_writeb_io(dev, c, 0x19, 0x00); cpu_relax(); } j = atp_readb_io(dev, c, 0x17); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_ins: if (is885(dev) || is880(dev)) atp_writeb_io(dev, c, 0x14, 0x06); else atp_writeb_io(dev, c, 0x14, 0xff); atp_writeb_io(dev, c, 0x18, 0x20); k = 0; phase_ins1: j = atp_readb_io(dev, c, 0x1f); if ((j & 0x01) != 0x00) { mbuf[k++] = atp_readb_io(dev, c, 0x19); goto phase_ins1; } if ((j & 0x80) == 0x00) { goto phase_ins1; } while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00); j = atp_readb_io(dev, c, 0x17); if (j == 0x85) { goto tar_dcons; } j &= 0x0f; if (j == 0x0f) { goto phase_ins; } if (j == 0x0a) { goto phase_cmds; } if (j == 0x0e) { goto phase_outs; } continue; phase_cmds: atp_writeb_io(dev, c, 0x10, 0x30); tar_dcons: atp_writeb_io(dev, c, 0x14, 0x00); atp_writeb_io(dev, c, 0x18, 0x08); while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) cpu_relax(); j = atp_readb_io(dev, c, 0x17); if (j != 0x16) { continue; } if (mbuf[0] != 0x01) { continue; } if (mbuf[1] != 0x03) { continue; } if (mbuf[4] == 0x00) { continue; } if (mbuf[3] > 0x64) { continue; } if (is885(dev) || is880(dev)) { if (mbuf[4] > 0x0e) { mbuf[4] = 0x0e; } } else { if (mbuf[4] > 0x0c) { mbuf[4] = 0x0c; } } dev->id[c][i].devsp = mbuf[4]; if (is885(dev) || is880(dev)) if (mbuf[3] < 0x0c) { j = 0xb0; goto set_syn_ok; } if ((mbuf[3] < 0x0d) && (rmb == 0)) { j = 0xa0; goto set_syn_ok; } if (mbuf[3] < 0x1a) { j = 0x20; goto set_syn_ok; } if (mbuf[3] < 0x33) { j = 0x40; goto set_syn_ok; } if (mbuf[3] < 0x4c) { j = 0x50; goto set_syn_ok; } j = 0x60; set_syn_ok: dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j; #ifdef ED_DBGP printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp); #endif } }
gpl-2.0
taozhijiang/linux
arch/arm/kernel/kgdb.c
451
6929
/* * arch/arm/kernel/kgdb.c * * ARM KGDB support * * Copyright (c) 2002-2004 MontaVista Software, Inc * Copyright (c) 2008 Wind River Systems, Inc. * * Authors: George Davis <davis_g@mvista.com> * Deepak Saxena <dsaxena@plexity.net> */ #include <linux/irq.h> #include <linux/kdebug.h> #include <linux/kgdb.h> #include <linux/uaccess.h> #include <asm/patch.h> #include <asm/traps.h> struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, { "r1", 4, offsetof(struct pt_regs, ARM_r1)}, { "r2", 4, offsetof(struct pt_regs, ARM_r2)}, { "r3", 4, offsetof(struct pt_regs, ARM_r3)}, { "r4", 4, offsetof(struct pt_regs, ARM_r4)}, { "r5", 4, offsetof(struct pt_regs, ARM_r5)}, { "r6", 4, offsetof(struct pt_regs, ARM_r6)}, { "r7", 4, offsetof(struct pt_regs, ARM_r7)}, { "r8", 4, offsetof(struct pt_regs, ARM_r8)}, { "r9", 4, offsetof(struct pt_regs, ARM_r9)}, { "r10", 4, offsetof(struct pt_regs, ARM_r10)}, { "fp", 4, offsetof(struct pt_regs, ARM_fp)}, { "ip", 4, offsetof(struct pt_regs, ARM_ip)}, { "sp", 4, offsetof(struct pt_regs, ARM_sp)}, { "lr", 4, offsetof(struct pt_regs, ARM_lr)}, { "pc", 4, offsetof(struct pt_regs, ARM_pc)}, { "f0", 12, -1 }, { "f1", 12, -1 }, { "f2", 12, -1 }, { "f3", 12, -1 }, { "f4", 12, -1 }, { "f5", 12, -1 }, { "f6", 12, -1 }, { "f7", 12, -1 }, { "fps", 4, -1 }, { "cpsr", 4, offsetof(struct pt_regs, ARM_cpsr)}, }; char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].offset != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); else memset(mem, 0, dbg_reg_def[regno].size); return dbg_reg_def[regno].name; } int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return -EINVAL; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { struct thread_info *ti; int regno; /* Just making sure... */ if (task == NULL) return; /* Initialize to zero */ for (regno = 0; regno < GDB_MAX_REGS; regno++) gdb_regs[regno] = 0; /* Otherwise, we have only some registers from switch_to() */ ti = task_thread_info(task); gdb_regs[_R4] = ti->cpu_context.r4; gdb_regs[_R5] = ti->cpu_context.r5; gdb_regs[_R6] = ti->cpu_context.r6; gdb_regs[_R7] = ti->cpu_context.r7; gdb_regs[_R8] = ti->cpu_context.r8; gdb_regs[_R9] = ti->cpu_context.r9; gdb_regs[_R10] = ti->cpu_context.sl; gdb_regs[_FP] = ti->cpu_context.fp; gdb_regs[_SPT] = ti->cpu_context.sp; gdb_regs[_PC] = ti->cpu_context.pc; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) { regs->ARM_pc = pc; } static int compiled_break; int kgdb_arch_handle_exception(int exception_vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *linux_regs) { unsigned long addr; char *ptr; switch (remcom_in_buffer[0]) { case 'D': case 'k': case 'c': /* * Try to read optional parameter, pc unchanged if no parm. * If this was a compiled breakpoint, we need to move * to the next instruction or we will just breakpoint * over and over again. */ ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ARM_pc = addr; else if (compiled_break == 1) linux_regs->ARM_pc += 4; compiled_break = 0; return 0; } return -1; } static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) { kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) { compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } static struct undef_hook kgdb_brkpt_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_BREAKINST, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_brk_fn }; static struct undef_hook kgdb_compiled_brkpt_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_COMPILED_BREAK, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_compiled_brk_fn }; static void kgdb_call_nmi_hook(void *ignored) { kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); } void kgdb_roundup_cpus(unsigned long flags) { local_irq_enable(); smp_call_function(kgdb_call_nmi_hook, NULL, 0); local_irq_disable(); } static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; if (kgdb_handle_exception(1, args->signr, cmd, regs)) return NOTIFY_DONE; return NOTIFY_STOP; } static int kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { unsigned long flags; int ret; local_irq_save(flags); ret = __kgdb_notify(ptr, cmd); local_irq_restore(flags); return ret; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_notify, .priority = -INT_MAX, }; /** * kgdb_arch_init - Perform any architecture specific initalization. * * This function will handle the initalization of any architecture * specific callbacks. */ int kgdb_arch_init(void) { int ret = register_die_notifier(&kgdb_notifier); if (ret != 0) return ret; register_undef_hook(&kgdb_brkpt_hook); register_undef_hook(&kgdb_compiled_brkpt_hook); return 0; } /** * kgdb_arch_exit - Perform any architecture specific uninitalization. * * This function will handle the uninitalization of any architecture * specific callbacks, for dynamic registration and unregistration. */ void kgdb_arch_exit(void) { unregister_undef_hook(&kgdb_brkpt_hook); unregister_undef_hook(&kgdb_compiled_brkpt_hook); unregister_die_notifier(&kgdb_notifier); } int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; /* patch_text() only supports int-sized breakpoints */ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; /* Machine is already stopped, so we can use __patch_text() directly */ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); return err; } int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { /* Machine is already stopped, so we can use __patch_text() directly */ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); return 0; } /* * Register our undef instruction hooks with ARM undef core. * We regsiter a hook specifically looking for the KGB break inst * and we handle the normal undef case within the do_undefinstr * handler. */ struct kgdb_arch arch_kgdb_ops = { #ifndef __ARMEB__ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} #else /* ! __ARMEB__ */ .gdb_bpt_instr = {0xe7, 0xff, 0xde, 0xfe} #endif };
gpl-2.0
supercurio/linux_shw-m110s
arch/arm/mach-omap2/board-4430sdp.c
451
2330
/* * Board support file for OMAP4430 SDP. * * Copyright (C) 2009 Texas Instruments * * Author: Santosh Shilimkar <santosh.shilimkar@ti.com> * * Based on mach-omap2/board-3430sdp.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/board.h> #include <mach/common.h> #include <mach/control.h> #include <mach/timer-gp.h> #include <asm/hardware/gic.h> static struct platform_device sdp4430_lcd_device = { .name = "sdp4430_lcd", .id = -1, }; static struct platform_device *sdp4430_devices[] __initdata = { &sdp4430_lcd_device, }; static struct omap_uart_config sdp4430_uart_config __initdata = { .enabled_uarts = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3), }; static struct omap_lcd_config sdp4430_lcd_config __initdata = { .ctrl_name = "internal", }; static struct omap_board_config_kernel sdp4430_config[] __initdata = { { OMAP_TAG_LCD, &sdp4430_lcd_config }, }; static void __init gic_init_irq(void) { gic_dist_init(0, OMAP2_IO_ADDRESS(OMAP44XX_GIC_DIST_BASE), 29); gic_cpu_init(0, OMAP2_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE)); } static void __init omap_4430sdp_init_irq(void) { omap_board_config = sdp4430_config; omap_board_config_size = ARRAY_SIZE(sdp4430_config); omap2_init_common_hw(NULL, NULL); #ifdef CONFIG_OMAP_32K_TIMER omap2_gp_clockevent_set_gptimer(1); #endif gic_init_irq(); omap_gpio_init(); } static void __init omap_4430sdp_init(void) { platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices)); omap_serial_init(); } static void __init omap_4430sdp_map_io(void) { omap2_set_globals_443x(); omap2_map_common_io(); } MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board") /* Maintainer: Santosh Shilimkar - Texas Instruments Inc */ .phys_io = 0x48000000, .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc, .boot_params = 0x80000100, .map_io = omap_4430sdp_map_io, .init_irq = omap_4430sdp_init_irq, .init_machine = omap_4430sdp_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
gtvhacker/Sony-x86-kexec
net/mac80211/wme.c
963
3796
/* * Copyright 2004, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/types.h> #include <net/ip.h> #include <net/pkt_sched.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "wme.h" /* Default mapping in classifier to work with default * queue setup. */ const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; static int wme_downgrade_ac(struct sk_buff *skb) { switch (skb->priority) { case 6: case 7: skb->priority = 5; /* VO -> VI */ return 0; case 4: case 5: skb->priority = 3; /* VI -> BE */ return 0; case 0: case 3: skb->priority = 2; /* BE -> BK */ return 0; default: return -1; } } /* Indicate which queue to use. */ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct sta_info *sta = NULL; u32 sta_flags = 0; const u8 *ra = NULL; bool qos = false; if (local->hw.queues < 4 || skb->len < 6) { skb->priority = 0; /* required for correct WPA/11i MIC */ return min_t(u16, local->hw.queues - 1, ieee802_1d_to_ac[skb->priority]); } rcu_read_lock(); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: rcu_read_lock(); sta = rcu_dereference(sdata->u.vlan.sta); if (sta) sta_flags = get_sta_flags(sta); rcu_read_unlock(); if (sta) break; case NL80211_IFTYPE_AP: ra = skb->data; break; case NL80211_IFTYPE_WDS: ra = sdata->u.wds.remote_addr; break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: /* * XXX: This is clearly broken ... but already was before, * because ieee80211_fill_mesh_addresses() would clear A1 * except for multicast addresses. */ break; #endif case NL80211_IFTYPE_STATION: ra = sdata->u.mgd.bssid; break; case NL80211_IFTYPE_ADHOC: ra = skb->data; break; default: break; } if (!sta && ra && !is_multicast_ether_addr(ra)) { sta = sta_info_get(sdata, ra); if (sta) sta_flags = get_sta_flags(sta); } if (sta_flags & WLAN_STA_WME) qos = true; rcu_read_unlock(); if (!qos) { skb->priority = 0; /* required for correct WPA/11i MIC */ return ieee802_1d_to_ac[skb->priority]; } /* use the data classifier to determine what 802.1d tag the * data frame has */ skb->priority = cfg80211_classify8021d(skb); return ieee80211_downgrade_queue(local, skb); } u16 ieee80211_downgrade_queue(struct ieee80211_local *local, struct sk_buff *skb) { /* in case we are a client verify acm is not set for this ac */ while (unlikely(local->wmm_acm & BIT(skb->priority))) { if (wme_downgrade_ac(skb)) { /* * This should not really happen. The AP has marked all * lower ACs to require admission control which is not * a reasonable configuration. Allow the frame to be * transmitted using AC_BK as a workaround. */ break; } } /* look up which queue to use for frames with this 1d tag */ return ieee802_1d_to_ac[skb->priority]; } void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; /* Fill in the QoS header if there is one. */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *p = ieee80211_get_qos_ctl(hdr); u8 ack_policy = 0, tid; tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; if (unlikely(local->wifi_wme_noack_test)) ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << QOS_CONTROL_ACK_POLICY_SHIFT; /* qos header is 2 bytes, second reserved */ *p++ = ack_policy | tid; *p = 0; } }
gpl-2.0
electrikjesus/android_kernel_samsung_smdk4412
drivers/md/dm-mpath.c
1475
41067
/* * Copyright (C) 2003 Sistina Software Limited. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include <linux/device-mapper.h> #include "dm-path-selector.h" #include "dm-uevent.h" #include <linux/ctype.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/workqueue.h> #include <scsi/scsi_dh.h> #include <asm/atomic.h> #define DM_MSG_PREFIX "multipath" #define MESG_STR(x) x, sizeof(x) #define DM_PG_INIT_DELAY_MSECS 2000 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) /* Path properties */ struct pgpath { struct list_head list; struct priority_group *pg; /* Owning PG */ unsigned is_active; /* Path status */ unsigned fail_count; /* Cumulative failure count */ struct dm_path path; struct delayed_work activate_path; }; #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) /* * Paths are grouped into Priority Groups and numbered from 1 upwards. * Each has a path selector which controls which path gets used. */ struct priority_group { struct list_head list; struct multipath *m; /* Owning multipath instance */ struct path_selector ps; unsigned pg_num; /* Reference number */ unsigned bypassed; /* Temporarily bypass this PG? */ unsigned nr_pgpaths; /* Number of paths in PG */ struct list_head pgpaths; }; /* Multipath context */ struct multipath { struct list_head list; struct dm_target *ti; spinlock_t lock; const char *hw_handler_name; char *hw_handler_params; unsigned nr_priority_groups; struct list_head priority_groups; wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ unsigned pg_init_required; /* pg_init needs calling? */ unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ unsigned pg_init_delay_retry; /* Delay pg_init retry? */ unsigned nr_valid_paths; /* Total number of usable paths */ struct pgpath *current_pgpath; struct priority_group *current_pg; struct priority_group *next_pg; /* Switch to this PG if set */ unsigned repeat_count; /* I/Os left before calling PS again */ unsigned queue_io; /* Must we queue all I/O? */ unsigned queue_if_no_path; /* Queue I/O if last path fails? */ unsigned saved_queue_if_no_path;/* Saved state during suspension */ unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ struct work_struct process_queued_ios; struct list_head queued_ios; unsigned queue_size; struct work_struct trigger_event; /* * We must use a mempool of dm_mpath_io structs so that we * can resubmit bios on error. */ mempool_t *mpio_pool; struct mutex work_mutex; }; /* * Context information attached to each bio we process. */ struct dm_mpath_io { struct pgpath *pgpath; size_t nr_bytes; }; typedef int (*action_fn) (struct pgpath *pgpath); #define MIN_IOS 256 /* Mempool size */ static struct kmem_cache *_mpio_cache; static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); /*----------------------------------------------- * Allocation routines *-----------------------------------------------*/ static struct pgpath *alloc_pgpath(void) { struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); if (pgpath) { pgpath->is_active = 1; INIT_DELAYED_WORK(&pgpath->activate_path, activate_path); } return pgpath; } static void free_pgpath(struct pgpath *pgpath) { kfree(pgpath); } static struct priority_group *alloc_priority_group(void) { struct priority_group *pg; pg = kzalloc(sizeof(*pg), GFP_KERNEL); if (pg) INIT_LIST_HEAD(&pg->pgpaths); return pg; } static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) { struct pgpath *pgpath, *tmp; struct multipath *m = ti->private; list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { list_del(&pgpath->list); if (m->hw_handler_name) scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); dm_put_device(ti, pgpath->path.dev); free_pgpath(pgpath); } } static void free_priority_group(struct priority_group *pg, struct dm_target *ti) { struct path_selector *ps = &pg->ps; if (ps->type) { ps->type->destroy(ps); dm_put_path_selector(ps->type); } free_pgpaths(&pg->pgpaths, ti); kfree(pg); } static struct multipath *alloc_multipath(struct dm_target *ti) { struct multipath *m; m = kzalloc(sizeof(*m), GFP_KERNEL); if (m) { INIT_LIST_HEAD(&m->priority_groups); INIT_LIST_HEAD(&m->queued_ios); spin_lock_init(&m->lock); m->queue_io = 1; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); return NULL; } m->ti = ti; ti->private = m; } return m; } static void free_multipath(struct multipath *m) { struct priority_group *pg, *tmp; list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { list_del(&pg->list); free_priority_group(pg, m->ti); } kfree(m->hw_handler_name); kfree(m->hw_handler_params); mempool_destroy(m->mpio_pool); kfree(m); } /*----------------------------------------------- * Path selection *-----------------------------------------------*/ static void __pg_init_all_paths(struct multipath *m) { struct pgpath *pgpath; unsigned long pg_init_delay = 0; m->pg_init_count++; m->pg_init_required = 0; if (m->pg_init_delay_retry) pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { /* Skip failed paths */ if (!pgpath->is_active) continue; if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, pg_init_delay)) m->pg_init_in_progress++; } } static void __switch_pg(struct multipath *m, struct pgpath *pgpath) { m->current_pg = pgpath->pg; /* Must we initialise the PG first, and queue I/O till it's ready? */ if (m->hw_handler_name) { m->pg_init_required = 1; m->queue_io = 1; } else { m->pg_init_required = 0; m->queue_io = 0; } m->pg_init_count = 0; } static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, size_t nr_bytes) { struct dm_path *path; path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); if (!path) return -ENXIO; m->current_pgpath = path_to_pgpath(path); if (m->current_pg != pg) __switch_pg(m, m->current_pgpath); return 0; } static void __choose_pgpath(struct multipath *m, size_t nr_bytes) { struct priority_group *pg; unsigned bypassed = 1; if (!m->nr_valid_paths) goto failed; /* Were we instructed to switch PG? */ if (m->next_pg) { pg = m->next_pg; m->next_pg = NULL; if (!__choose_path_in_pg(m, pg, nr_bytes)) return; } /* Don't change PG until it has no remaining paths */ if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes)) return; /* * Loop through priority groups until we find a valid path. * First time we skip PGs marked 'bypassed'. * Second time we only try the ones we skipped. */ do { list_for_each_entry(pg, &m->priority_groups, list) { if (pg->bypassed == bypassed) continue; if (!__choose_path_in_pg(m, pg, nr_bytes)) return; } } while (bypassed--); failed: m->current_pgpath = NULL; m->current_pg = NULL; } /* * Check whether bios must be queued in the device-mapper core rather * than here in the target. * * m->lock must be held on entry. * * If m->queue_if_no_path and m->saved_queue_if_no_path hold the * same value then we are not between multipath_presuspend() * and multipath_resume() calls and we have no need to check * for the DMF_NOFLUSH_SUSPENDING flag. */ static int __must_push_back(struct multipath *m) { return (m->queue_if_no_path != m->saved_queue_if_no_path && dm_noflush_suspending(m->ti)); } static int map_io(struct multipath *m, struct request *clone, struct dm_mpath_io *mpio, unsigned was_queued) { int r = DM_MAPIO_REMAPPED; size_t nr_bytes = blk_rq_bytes(clone); unsigned long flags; struct pgpath *pgpath; struct block_device *bdev; spin_lock_irqsave(&m->lock, flags); /* Do we need to select a new pgpath? */ if (!m->current_pgpath || (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) __choose_pgpath(m, nr_bytes); pgpath = m->current_pgpath; if (was_queued) m->queue_size--; if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) { /* Queue for the daemon to resubmit */ list_add_tail(&clone->queuelist, &m->queued_ios); m->queue_size++; if ((m->pg_init_required && !m->pg_init_in_progress) || !m->queue_io) queue_work(kmultipathd, &m->process_queued_ios); pgpath = NULL; r = DM_MAPIO_SUBMITTED; } else if (pgpath) { bdev = pgpath->path.dev->bdev; clone->q = bdev_get_queue(bdev); clone->rq_disk = bdev->bd_disk; } else if (__must_push_back(m)) r = DM_MAPIO_REQUEUE; else r = -EIO; /* Failed */ mpio->pgpath = pgpath; mpio->nr_bytes = nr_bytes; if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, nr_bytes); spin_unlock_irqrestore(&m->lock, flags); return r; } /* * If we run out of usable paths, should we queue I/O or error it? */ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, unsigned save_old_value) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); if (save_old_value) m->saved_queue_if_no_path = m->queue_if_no_path; else m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; if (!m->queue_if_no_path && m->queue_size) queue_work(kmultipathd, &m->process_queued_ios); spin_unlock_irqrestore(&m->lock, flags); return 0; } /*----------------------------------------------------------------- * The multipath daemon is responsible for resubmitting queued ios. *---------------------------------------------------------------*/ static void dispatch_queued_ios(struct multipath *m) { int r; unsigned long flags; struct dm_mpath_io *mpio; union map_info *info; struct request *clone, *n; LIST_HEAD(cl); spin_lock_irqsave(&m->lock, flags); list_splice_init(&m->queued_ios, &cl); spin_unlock_irqrestore(&m->lock, flags); list_for_each_entry_safe(clone, n, &cl, queuelist) { list_del_init(&clone->queuelist); info = dm_get_rq_mapinfo(clone); mpio = info->ptr; r = map_io(m, clone, mpio, 1); if (r < 0) { mempool_free(mpio, m->mpio_pool); dm_kill_unmapped_request(clone, r); } else if (r == DM_MAPIO_REMAPPED) dm_dispatch_request(clone); else if (r == DM_MAPIO_REQUEUE) { mempool_free(mpio, m->mpio_pool); dm_requeue_unmapped_request(clone); } } } static void process_queued_ios(struct work_struct *work) { struct multipath *m = container_of(work, struct multipath, process_queued_ios); struct pgpath *pgpath = NULL; unsigned must_queue = 1; unsigned long flags; spin_lock_irqsave(&m->lock, flags); if (!m->queue_size) goto out; if (!m->current_pgpath) __choose_pgpath(m, 0); pgpath = m->current_pgpath; if ((pgpath && !m->queue_io) || (!pgpath && !m->queue_if_no_path)) must_queue = 0; if (m->pg_init_required && !m->pg_init_in_progress && pgpath) __pg_init_all_paths(m); out: spin_unlock_irqrestore(&m->lock, flags); if (!must_queue) dispatch_queued_ios(m); } /* * An event is triggered whenever a path is taken out of use. * Includes path failure and PG bypass. */ static void trigger_event(struct work_struct *work) { struct multipath *m = container_of(work, struct multipath, trigger_event); dm_table_event(m->ti->table); } /*----------------------------------------------------------------- * Constructor/argument parsing: * <#multipath feature args> [<arg>]* * <#hw_handler args> [hw_handler [<arg>]*] * <#priority groups> * <initial priority group> * [<selector> <#selector args> [<arg>]* * <#paths> <#per-path selector args> * [<path> [<arg>]* ]+ ]+ *---------------------------------------------------------------*/ struct param { unsigned min; unsigned max; char *error; }; static int read_param(struct param *param, char *str, unsigned *v, char **error) { if (!str || (sscanf(str, "%u", v) != 1) || (*v < param->min) || (*v > param->max)) { *error = param->error; return -EINVAL; } return 0; } struct arg_set { unsigned argc; char **argv; }; static char *shift(struct arg_set *as) { char *r; if (as->argc) { as->argc--; r = *as->argv; as->argv++; return r; } return NULL; } static void consume(struct arg_set *as, unsigned n) { BUG_ON (as->argc < n); as->argc -= n; as->argv += n; } static int parse_path_selector(struct arg_set *as, struct priority_group *pg, struct dm_target *ti) { int r; struct path_selector_type *pst; unsigned ps_argc; static struct param _params[] = { {0, 1024, "invalid number of path selector args"}, }; pst = dm_get_path_selector(shift(as)); if (!pst) { ti->error = "unknown path selector type"; return -EINVAL; } r = read_param(_params, shift(as), &ps_argc, &ti->error); if (r) { dm_put_path_selector(pst); return -EINVAL; } if (ps_argc > as->argc) { dm_put_path_selector(pst); ti->error = "not enough arguments for path selector"; return -EINVAL; } r = pst->create(&pg->ps, ps_argc, as->argv); if (r) { dm_put_path_selector(pst); ti->error = "path selector constructor failed"; return r; } pg->ps.type = pst; consume(as, ps_argc); return 0; } static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, struct dm_target *ti) { int r; struct pgpath *p; struct multipath *m = ti->private; /* we need at least a path arg */ if (as->argc < 1) { ti->error = "no device given"; return ERR_PTR(-EINVAL); } p = alloc_pgpath(); if (!p) return ERR_PTR(-ENOMEM); r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), &p->path.dev); if (r) { ti->error = "error getting device"; goto bad; } if (m->hw_handler_name) { struct request_queue *q = bdev_get_queue(p->path.dev->bdev); r = scsi_dh_attach(q, m->hw_handler_name); if (r == -EBUSY) { /* * Already attached to different hw_handler, * try to reattach with correct one. */ scsi_dh_detach(q); r = scsi_dh_attach(q, m->hw_handler_name); } if (r < 0) { ti->error = "error attaching hardware handler"; dm_put_device(ti, p->path.dev); goto bad; } if (m->hw_handler_params) { r = scsi_dh_set_params(q, m->hw_handler_params); if (r < 0) { ti->error = "unable to set hardware " "handler parameters"; scsi_dh_detach(q); dm_put_device(ti, p->path.dev); goto bad; } } } r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); if (r) { dm_put_device(ti, p->path.dev); goto bad; } return p; bad: free_pgpath(p); return ERR_PTR(r); } static struct priority_group *parse_priority_group(struct arg_set *as, struct multipath *m) { static struct param _params[] = { {1, 1024, "invalid number of paths"}, {0, 1024, "invalid number of selector args"} }; int r; unsigned i, nr_selector_args, nr_params; struct priority_group *pg; struct dm_target *ti = m->ti; if (as->argc < 2) { as->argc = 0; ti->error = "not enough priority group arguments"; return ERR_PTR(-EINVAL); } pg = alloc_priority_group(); if (!pg) { ti->error = "couldn't allocate priority group"; return ERR_PTR(-ENOMEM); } pg->m = m; r = parse_path_selector(as, pg, ti); if (r) goto bad; /* * read the paths */ r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); if (r) goto bad; r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); if (r) goto bad; nr_params = 1 + nr_selector_args; for (i = 0; i < pg->nr_pgpaths; i++) { struct pgpath *pgpath; struct arg_set path_args; if (as->argc < nr_params) { ti->error = "not enough path parameters"; r = -EINVAL; goto bad; } path_args.argc = nr_params; path_args.argv = as->argv; pgpath = parse_path(&path_args, &pg->ps, ti); if (IS_ERR(pgpath)) { r = PTR_ERR(pgpath); goto bad; } pgpath->pg = pg; list_add_tail(&pgpath->list, &pg->pgpaths); consume(as, nr_params); } return pg; bad: free_priority_group(pg, ti); return ERR_PTR(r); } static int parse_hw_handler(struct arg_set *as, struct multipath *m) { unsigned hw_argc; int ret; struct dm_target *ti = m->ti; static struct param _params[] = { {0, 1024, "invalid number of hardware handler args"}, }; if (read_param(_params, shift(as), &hw_argc, &ti->error)) return -EINVAL; if (!hw_argc) return 0; if (hw_argc > as->argc) { ti->error = "not enough arguments for hardware handler"; return -EINVAL; } m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); request_module("scsi_dh_%s", m->hw_handler_name); if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; ret = -EINVAL; goto fail; } if (hw_argc > 1) { char *p; int i, j, len = 4; for (i = 0; i <= hw_argc - 2; i++) len += strlen(as->argv[i]) + 1; p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); if (!p) { ti->error = "memory allocation failed"; ret = -ENOMEM; goto fail; } j = sprintf(p, "%d", hw_argc - 1); for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) j = sprintf(p, "%s", as->argv[i]); } consume(as, hw_argc - 1); return 0; fail: kfree(m->hw_handler_name); m->hw_handler_name = NULL; return ret; } static int parse_features(struct arg_set *as, struct multipath *m) { int r; unsigned argc; struct dm_target *ti = m->ti; const char *param_name; static struct param _params[] = { {0, 5, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, }; r = read_param(_params, shift(as), &argc, &ti->error); if (r) return -EINVAL; if (!argc) return 0; if (argc > as->argc) { ti->error = "not enough arguments for features"; return -EINVAL; } do { param_name = shift(as); argc--; if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { r = queue_if_no_path(m, 1, 0); continue; } if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && (argc >= 1)) { r = read_param(_params + 1, shift(as), &m->pg_init_retries, &ti->error); argc--; continue; } if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) && (argc >= 1)) { r = read_param(_params + 2, shift(as), &m->pg_init_delay_msecs, &ti->error); argc--; continue; } ti->error = "Unrecognised multipath feature request"; r = -EINVAL; } while (argc && !r); return r; } static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) { /* target parameters */ static struct param _params[] = { {0, 1024, "invalid number of priority groups"}, {0, 1024, "invalid initial priority group number"}, }; int r; struct multipath *m; struct arg_set as; unsigned pg_count = 0; unsigned next_pg_num; as.argc = argc; as.argv = argv; m = alloc_multipath(ti); if (!m) { ti->error = "can't allocate multipath"; return -EINVAL; } r = parse_features(&as, m); if (r) goto bad; r = parse_hw_handler(&as, m); if (r) goto bad; r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); if (r) goto bad; r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); if (r) goto bad; if ((!m->nr_priority_groups && next_pg_num) || (m->nr_priority_groups && !next_pg_num)) { ti->error = "invalid initial priority group"; r = -EINVAL; goto bad; } /* parse the priority groups */ while (as.argc) { struct priority_group *pg; pg = parse_priority_group(&as, m); if (IS_ERR(pg)) { r = PTR_ERR(pg); goto bad; } m->nr_valid_paths += pg->nr_pgpaths; list_add_tail(&pg->list, &m->priority_groups); pg_count++; pg->pg_num = pg_count; if (!--next_pg_num) m->next_pg = pg; } if (pg_count != m->nr_priority_groups) { ti->error = "priority group count mismatch"; r = -EINVAL; goto bad; } ti->num_flush_requests = 1; ti->num_discard_requests = 1; return 0; bad: free_multipath(m); return r; } static void multipath_wait_for_pg_init_completion(struct multipath *m) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; add_wait_queue(&m->pg_init_wait, &wait); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); spin_lock_irqsave(&m->lock, flags); if (!m->pg_init_in_progress) { spin_unlock_irqrestore(&m->lock, flags); break; } spin_unlock_irqrestore(&m->lock, flags); io_schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&m->pg_init_wait, &wait); } static void flush_multipath_work(struct multipath *m) { flush_workqueue(kmpath_handlerd); multipath_wait_for_pg_init_completion(m); flush_workqueue(kmultipathd); flush_work_sync(&m->trigger_event); } static void multipath_dtr(struct dm_target *ti) { struct multipath *m = ti->private; flush_multipath_work(m); free_multipath(m); } /* * Map cloned requests */ static int multipath_map(struct dm_target *ti, struct request *clone, union map_info *map_context) { int r; struct dm_mpath_io *mpio; struct multipath *m = (struct multipath *) ti->private; mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); if (!mpio) /* ENOMEM, requeue */ return DM_MAPIO_REQUEUE; memset(mpio, 0, sizeof(*mpio)); map_context->ptr = mpio; clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; r = map_io(m, clone, mpio, 0); if (r < 0 || r == DM_MAPIO_REQUEUE) mempool_free(mpio, m->mpio_pool); return r; } /* * Take a path out of use. */ static int fail_path(struct pgpath *pgpath) { unsigned long flags; struct multipath *m = pgpath->pg->m; spin_lock_irqsave(&m->lock, flags); if (!pgpath->is_active) goto out; DMWARN("Failing path %s.", pgpath->path.dev->name); pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); pgpath->is_active = 0; pgpath->fail_count++; m->nr_valid_paths--; if (pgpath == m->current_pgpath) m->current_pgpath = NULL; dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, pgpath->path.dev->name, m->nr_valid_paths); schedule_work(&m->trigger_event); out: spin_unlock_irqrestore(&m->lock, flags); return 0; } /* * Reinstate a previously-failed path */ static int reinstate_path(struct pgpath *pgpath) { int r = 0; unsigned long flags; struct multipath *m = pgpath->pg->m; spin_lock_irqsave(&m->lock, flags); if (pgpath->is_active) goto out; if (!pgpath->pg->ps.type->reinstate_path) { DMWARN("Reinstate path not supported by path selector %s", pgpath->pg->ps.type->name); r = -EINVAL; goto out; } r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); if (r) goto out; pgpath->is_active = 1; if (!m->nr_valid_paths++ && m->queue_size) { m->current_pgpath = NULL; queue_work(kmultipathd, &m->process_queued_ios); } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; } dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, pgpath->path.dev->name, m->nr_valid_paths); schedule_work(&m->trigger_event); out: spin_unlock_irqrestore(&m->lock, flags); return r; } /* * Fail or reinstate all paths that match the provided struct dm_dev. */ static int action_dev(struct multipath *m, struct dm_dev *dev, action_fn action) { int r = -EINVAL; struct pgpath *pgpath; struct priority_group *pg; list_for_each_entry(pg, &m->priority_groups, list) { list_for_each_entry(pgpath, &pg->pgpaths, list) { if (pgpath->path.dev == dev) r = action(pgpath); } } return r; } /* * Temporarily try to avoid having to use the specified PG */ static void bypass_pg(struct multipath *m, struct priority_group *pg, int bypassed) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); pg->bypassed = bypassed; m->current_pgpath = NULL; m->current_pg = NULL; spin_unlock_irqrestore(&m->lock, flags); schedule_work(&m->trigger_event); } /* * Switch to using the specified PG from the next I/O that gets mapped */ static int switch_pg_num(struct multipath *m, const char *pgstr) { struct priority_group *pg; unsigned pgnum; unsigned long flags; if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || (pgnum > m->nr_priority_groups)) { DMWARN("invalid PG number supplied to switch_pg_num"); return -EINVAL; } spin_lock_irqsave(&m->lock, flags); list_for_each_entry(pg, &m->priority_groups, list) { pg->bypassed = 0; if (--pgnum) continue; m->current_pgpath = NULL; m->current_pg = NULL; m->next_pg = pg; } spin_unlock_irqrestore(&m->lock, flags); schedule_work(&m->trigger_event); return 0; } /* * Set/clear bypassed status of a PG. * PGs are numbered upwards from 1 in the order they were declared. */ static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed) { struct priority_group *pg; unsigned pgnum; if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || (pgnum > m->nr_priority_groups)) { DMWARN("invalid PG number supplied to bypass_pg"); return -EINVAL; } list_for_each_entry(pg, &m->priority_groups, list) { if (!--pgnum) break; } bypass_pg(m, pg, bypassed); return 0; } /* * Should we retry pg_init immediately? */ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) { unsigned long flags; int limit_reached = 0; spin_lock_irqsave(&m->lock, flags); if (m->pg_init_count <= m->pg_init_retries) m->pg_init_required = 1; else limit_reached = 1; spin_unlock_irqrestore(&m->lock, flags); return limit_reached; } static void pg_init_done(void *data, int errors) { struct pgpath *pgpath = data; struct priority_group *pg = pgpath->pg; struct multipath *m = pg->m; unsigned long flags; unsigned delay_retry = 0; /* device or driver problems */ switch (errors) { case SCSI_DH_OK: break; case SCSI_DH_NOSYS: if (!m->hw_handler_name) { errors = 0; break; } DMERR("Could not failover the device: Handler scsi_dh_%s " "Error %d.", m->hw_handler_name, errors); /* * Fail path for now, so we do not ping pong */ fail_path(pgpath); break; case SCSI_DH_DEV_TEMP_BUSY: /* * Probably doing something like FW upgrade on the * controller so try the other pg. */ bypass_pg(m, pg, 1); break; case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = 1; case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) fail_path(pgpath); errors = 0; break; default: /* * We probably do not want to fail the path for a device * error, but this is what the old dm did. In future * patches we can do more advanced handling. */ fail_path(pgpath); } spin_lock_irqsave(&m->lock, flags); if (errors) { if (pgpath == m->current_pgpath) { DMERR("Could not failover device. Error %d.", errors); m->current_pgpath = NULL; m->current_pg = NULL; } } else if (!m->pg_init_required) pg->bypassed = 0; if (--m->pg_init_in_progress) /* Activations of other paths are still on going */ goto out; if (!m->pg_init_required) m->queue_io = 0; m->pg_init_delay_retry = delay_retry; queue_work(kmultipathd, &m->process_queued_ios); /* * Wake up any thread waiting to suspend. */ wake_up(&m->pg_init_wait); out: spin_unlock_irqrestore(&m->lock, flags); } static void activate_path(struct work_struct *work) { struct pgpath *pgpath = container_of(work, struct pgpath, activate_path.work); scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), pg_init_done, pgpath); } /* * end_io handling */ static int do_end_io(struct multipath *m, struct request *clone, int error, struct dm_mpath_io *mpio) { /* * We don't queue any clone request inside the multipath target * during end I/O handling, since those clone requests don't have * bio clones. If we queue them inside the multipath target, * we need to make bio clones, that requires memory allocation. * (See drivers/md/dm.c:end_clone_bio() about why the clone requests * don't have bio clones.) * Instead of queueing the clone request here, we queue the original * request into dm core, which will remake a clone request and * clone bios for it and resubmit it later. */ int r = DM_ENDIO_REQUEUE; unsigned long flags; if (!error && !clone->errors) return 0; /* I/O complete */ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) return error; if (mpio->pgpath) fail_path(mpio->pgpath); spin_lock_irqsave(&m->lock, flags); if (!m->nr_valid_paths) { if (!m->queue_if_no_path) { if (!__must_push_back(m)) r = -EIO; } else { if (error == -EBADE) r = error; } } spin_unlock_irqrestore(&m->lock, flags); return r; } static int multipath_end_io(struct dm_target *ti, struct request *clone, int error, union map_info *map_context) { struct multipath *m = ti->private; struct dm_mpath_io *mpio = map_context->ptr; struct pgpath *pgpath = mpio->pgpath; struct path_selector *ps; int r; r = do_end_io(m, clone, error, mpio); if (pgpath) { ps = &pgpath->pg->ps; if (ps->type->end_io) ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); } mempool_free(mpio, m->mpio_pool); return r; } /* * Suspend can't complete until all the I/O is processed so if * the last path fails we must error any remaining I/O. * Note that if the freeze_bdev fails while suspending, the * queue_if_no_path state is lost - userspace should reset it. */ static void multipath_presuspend(struct dm_target *ti) { struct multipath *m = (struct multipath *) ti->private; queue_if_no_path(m, 0, 1); } static void multipath_postsuspend(struct dm_target *ti) { struct multipath *m = ti->private; mutex_lock(&m->work_mutex); flush_multipath_work(m); mutex_unlock(&m->work_mutex); } /* * Restore the queue_if_no_path setting. */ static void multipath_resume(struct dm_target *ti) { struct multipath *m = (struct multipath *) ti->private; unsigned long flags; spin_lock_irqsave(&m->lock, flags); m->queue_if_no_path = m->saved_queue_if_no_path; spin_unlock_irqrestore(&m->lock, flags); } /* * Info output has the following format: * num_multipath_feature_args [multipath_feature_args]* * num_handler_status_args [handler_status_args]* * num_groups init_group_number * [A|D|E num_ps_status_args [ps_status_args]* * num_paths num_selector_args * [path_dev A|F fail_count [selector_args]* ]+ ]+ * * Table output has the following format (identical to the constructor string): * num_feature_args [features_args]* * num_handler_args hw_handler [hw_handler_args]* * num_groups init_group_number * [priority selector-name num_ps_args [ps_args]* * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ */ static int multipath_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { int sz = 0; unsigned long flags; struct multipath *m = (struct multipath *) ti->private; struct priority_group *pg; struct pgpath *p; unsigned pg_num; char state; spin_lock_irqsave(&m->lock, flags); /* Features */ if (type == STATUSTYPE_INFO) DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); else { DMEMIT("%u ", m->queue_if_no_path + (m->pg_init_retries > 0) * 2 + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2); if (m->queue_if_no_path) DMEMIT("queue_if_no_path "); if (m->pg_init_retries) DMEMIT("pg_init_retries %u ", m->pg_init_retries); if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); } if (!m->hw_handler_name || type == STATUSTYPE_INFO) DMEMIT("0 "); else DMEMIT("1 %s ", m->hw_handler_name); DMEMIT("%u ", m->nr_priority_groups); if (m->next_pg) pg_num = m->next_pg->pg_num; else if (m->current_pg) pg_num = m->current_pg->pg_num; else pg_num = (m->nr_priority_groups ? 1 : 0); DMEMIT("%u ", pg_num); switch (type) { case STATUSTYPE_INFO: list_for_each_entry(pg, &m->priority_groups, list) { if (pg->bypassed) state = 'D'; /* Disabled */ else if (pg == m->current_pg) state = 'A'; /* Currently Active */ else state = 'E'; /* Enabled */ DMEMIT("%c ", state); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, NULL, type, result + sz, maxlen - sz); else DMEMIT("0 "); DMEMIT("%u %u ", pg->nr_pgpaths, pg->ps.type->info_args); list_for_each_entry(p, &pg->pgpaths, list) { DMEMIT("%s %s %u ", p->path.dev->name, p->is_active ? "A" : "F", p->fail_count); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, &p->path, type, result + sz, maxlen - sz); } } break; case STATUSTYPE_TABLE: list_for_each_entry(pg, &m->priority_groups, list) { DMEMIT("%s ", pg->ps.type->name); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, NULL, type, result + sz, maxlen - sz); else DMEMIT("0 "); DMEMIT("%u %u ", pg->nr_pgpaths, pg->ps.type->table_args); list_for_each_entry(p, &pg->pgpaths, list) { DMEMIT("%s ", p->path.dev->name); if (pg->ps.type->status) sz += pg->ps.type->status(&pg->ps, &p->path, type, result + sz, maxlen - sz); } } break; } spin_unlock_irqrestore(&m->lock, flags); return 0; } static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) { int r = -EINVAL; struct dm_dev *dev; struct multipath *m = (struct multipath *) ti->private; action_fn action; mutex_lock(&m->work_mutex); if (dm_suspended(ti)) { r = -EBUSY; goto out; } if (argc == 1) { if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { r = queue_if_no_path(m, 1, 0); goto out; } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { r = queue_if_no_path(m, 0, 0); goto out; } } if (argc != 2) { DMWARN("Unrecognised multipath message received."); goto out; } if (!strnicmp(argv[0], MESG_STR("disable_group"))) { r = bypass_pg_num(m, argv[1], 1); goto out; } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { r = bypass_pg_num(m, argv[1], 0); goto out; } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { r = switch_pg_num(m, argv[1]); goto out; } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) action = reinstate_path; else if (!strnicmp(argv[0], MESG_STR("fail_path"))) action = fail_path; else { DMWARN("Unrecognised multipath message received."); goto out; } r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); if (r) { DMWARN("message: error getting device %s", argv[1]); goto out; } r = action_dev(m, dev, action); dm_put_device(ti, dev); out: mutex_unlock(&m->work_mutex); return r; } static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct multipath *m = (struct multipath *) ti->private; struct block_device *bdev = NULL; fmode_t mode = 0; unsigned long flags; int r = 0; spin_lock_irqsave(&m->lock, flags); if (!m->current_pgpath) __choose_pgpath(m, 0); if (m->current_pgpath) { bdev = m->current_pgpath->path.dev->bdev; mode = m->current_pgpath->path.dev->mode; } if (m->queue_io) r = -EAGAIN; else if (!bdev) r = -EIO; spin_unlock_irqrestore(&m->lock, flags); /* * Only pass ioctls through if the device sizes match exactly. */ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) r = scsi_verify_blk_ioctl(NULL, cmd); return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } static int multipath_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct multipath *m = ti->private; struct priority_group *pg; struct pgpath *p; int ret = 0; list_for_each_entry(pg, &m->priority_groups, list) { list_for_each_entry(p, &pg->pgpaths, list) { ret = fn(ti, p->path.dev, ti->begin, ti->len, data); if (ret) goto out; } } out: return ret; } static int __pgpath_busy(struct pgpath *pgpath) { struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); return dm_underlying_device_busy(q); } /* * We return "busy", only when we can map I/Os but underlying devices * are busy (so even if we map I/Os now, the I/Os will wait on * the underlying queue). * In other words, if we want to kill I/Os or queue them inside us * due to map unavailability, we don't return "busy". Otherwise, * dm core won't give us the I/Os and we can't do what we want. */ static int multipath_busy(struct dm_target *ti) { int busy = 0, has_active = 0; struct multipath *m = ti->private; struct priority_group *pg; struct pgpath *pgpath; unsigned long flags; spin_lock_irqsave(&m->lock, flags); /* Guess which priority_group will be used at next mapping time */ if (unlikely(!m->current_pgpath && m->next_pg)) pg = m->next_pg; else if (likely(m->current_pg)) pg = m->current_pg; else /* * We don't know which pg will be used at next mapping time. * We don't call __choose_pgpath() here to avoid to trigger * pg_init just by busy checking. * So we don't know whether underlying devices we will be using * at next mapping time are busy or not. Just try mapping. */ goto out; /* * If there is one non-busy active path at least, the path selector * will be able to select it. So we consider such a pg as not busy. */ busy = 1; list_for_each_entry(pgpath, &pg->pgpaths, list) if (pgpath->is_active) { has_active = 1; if (!__pgpath_busy(pgpath)) { busy = 0; break; } } if (!has_active) /* * No active path in this pg, so this pg won't be used and * the current_pg will be changed at next mapping time. * We need to try mapping to determine it. */ busy = 0; out: spin_unlock_irqrestore(&m->lock, flags); return busy; } /*----------------------------------------------------------------- * Module setup *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, .map_rq = multipath_map, .rq_end_io = multipath_end_io, .presuspend = multipath_presuspend, .postsuspend = multipath_postsuspend, .resume = multipath_resume, .status = multipath_status, .message = multipath_message, .ioctl = multipath_ioctl, .iterate_devices = multipath_iterate_devices, .busy = multipath_busy, }; static int __init dm_multipath_init(void) { int r; /* allocate a slab for the dm_ios */ _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); if (!_mpio_cache) return -ENOMEM; r = dm_register_target(&multipath_target); if (r < 0) { DMERR("register failed %d", r); kmem_cache_destroy(_mpio_cache); return -EINVAL; } kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); if (!kmultipathd) { DMERR("failed to create workqueue kmpathd"); dm_unregister_target(&multipath_target); kmem_cache_destroy(_mpio_cache); return -ENOMEM; } /* * A separate workqueue is used to handle the device handlers * to avoid overloading existing workqueue. Overloading the * old workqueue would also create a bottleneck in the * path of the storage hardware device activation. */ kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd", WQ_MEM_RECLAIM); if (!kmpath_handlerd) { DMERR("failed to create workqueue kmpath_handlerd"); destroy_workqueue(kmultipathd); dm_unregister_target(&multipath_target); kmem_cache_destroy(_mpio_cache); return -ENOMEM; } DMINFO("version %u.%u.%u loaded", multipath_target.version[0], multipath_target.version[1], multipath_target.version[2]); return r; } static void __exit dm_multipath_exit(void) { destroy_workqueue(kmpath_handlerd); destroy_workqueue(kmultipathd); dm_unregister_target(&multipath_target); kmem_cache_destroy(_mpio_cache); } module_init(dm_multipath_init); module_exit(dm_multipath_exit); MODULE_DESCRIPTION(DM_NAME " multipath target"); MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
gueste/android_kernel_elephone_p8000
drivers/staging/vt6655/wroute.c
2243
5533
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: wroute.c * * Purpose: handle WMAC frame relay & filtering * * Author: Lyndon Chen * * Date: May 20, 2003 * * Functions: * ROUTEbRelay - Relay packet * * Revision History: * */ #include "mac.h" #include "tcrc.h" #include "rxtx.h" #include "wroute.h" #include "card.h" #include "baseband.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel = MSG_LEVEL_INFO; //static int msglevel =MSG_LEVEL_DEBUG; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /* * Description: * Relay packet. Return true if packet is copy to DMA1 * * Parameters: * In: * pDevice - * pbySkbData - rx packet skb data * Out: * true, false * * Return Value: true if packet duplicate; otherwise false * */ bool ROUTEbRelay(PSDevice pDevice, unsigned char *pbySkbData, unsigned int uDataLen, unsigned int uNodeIndex) { PSMgmtObject pMgmt = pDevice->pMgmt; PSTxDesc pHeadTD, pLastTD; unsigned int cbFrameBodySize; unsigned int uMACfragNum; unsigned char byPktType; bool bNeedEncryption = false; SKeyItem STempKey; PSKeyItem pTransmitKey = NULL; unsigned int cbHeaderSize; unsigned int ii; unsigned char *pbyBSSID; if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Relay can't allocate TD1..\n"); return false; } pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA]; pHeadTD->m_td1TD1.byTCR = (TCR_EDP | TCR_STP); memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)pbySkbData, ETH_HLEN); cbFrameBodySize = uDataLen - ETH_HLEN; if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) { cbFrameBodySize += 8; } if (pDevice->bEncryptionEnable == true) { bNeedEncryption = true; // get group key pbyBSSID = pDevice->abyBroadcastAddr; if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) { pTransmitKey = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG "Get GTK.\n"); } } if (pDevice->bEnableHostWEP) { if (uNodeIndex < MAX_NODE_NUM + 1) { pTransmitKey = &STempKey; pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite; pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex; pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength; pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16; pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0; memcpy(pTransmitKey->abyKey, &pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0], pTransmitKey->uKeyLength ); } } uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader); if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA)) { return false; } byPktType = (unsigned char)pDevice->byPacketType; if (pDevice->bFixRate) { if (pDevice->eCurrentPHYType == PHY_TYPE_11B) { if (pDevice->uConnectionRate >= RATE_11M) { pDevice->wCurrentRate = RATE_11M; } else { pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate; } } else { if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) && (pDevice->uConnectionRate <= RATE_6M)) { pDevice->wCurrentRate = RATE_6M; } else { if (pDevice->uConnectionRate >= RATE_54M) pDevice->wCurrentRate = RATE_54M; else pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate; } } } else { pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate; } if (pDevice->wCurrentRate <= RATE_11M) byPktType = PK_TYPE_11B; vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption, cbFrameBodySize, TYPE_AC0DMA, pHeadTD, &pDevice->sTxEthHeader, pbySkbData, pTransmitKey, uNodeIndex, &uMACfragNum, &cbHeaderSize ); if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) { // Disable PS MACbPSWakeup(pDevice->PortOffset); } pDevice->bPWBitOn = false; pLastTD = pHeadTD; for (ii = 0; ii < uMACfragNum; ii++) { // Poll Transmit the adapter wmb(); pHeadTD->m_td0TD0.f1Owner = OWNED_BY_NIC; wmb(); if (ii == (uMACfragNum - 1)) pLastTD = pHeadTD; pHeadTD = pHeadTD->next; } pLastTD->pTDInfo->skb = 0; pLastTD->pTDInfo->byFlags = 0; pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD; MACvTransmitAC0(pDevice->PortOffset); return true; }
gpl-2.0
halaszk/Perseus-halaszk-universal5433
drivers/staging/vt6656/datarate.c
2243
11098
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: datarate.c * * Purpose: Handles the auto fallback & data rates functions * * Author: Lyndon Chen * * Date: July 17, 2002 * * Functions: * RATEvParseMaxRate - Parsing the highest basic & support rate in rate field of frame * RATEvTxRateFallBack - Rate fallback Algorithm Implementaion * RATEuSetIE- Set rate IE field. * * Revision History: * */ #include "tmacro.h" #include "mac.h" #include "80211mgr.h" #include "bssdb.h" #include "datarate.h" #include "card.h" #include "baseband.h" #include "srom.h" #include "rf.h" /* static int msglevel = MSG_LEVEL_DEBUG; */ static int msglevel =MSG_LEVEL_INFO; const u8 acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C}; #define AUTORATE_TXOK_CNT 0x0400 #define AUTORATE_TXFAIL_CNT 0x0064 #define AUTORATE_TIMEOUT 10 void s_vResetCounter(PKnownNodeDB psNodeDBTable); void s_vResetCounter(PKnownNodeDB psNodeDBTable) { u8 ii; /* clear statistics counter for auto_rate */ for (ii = 0; ii <= MAX_RATE; ii++) { psNodeDBTable->uTxOk[ii] = 0; psNodeDBTable->uTxFail[ii] = 0; } } /*+ * * Routine Description: * Rate fallback Algorithm Implementaion * * Parameters: * In: * pDevice - Pointer to the adapter * psNodeDBTable - Pointer to Node Data Base * Out: * none * * Return Value: none * -*/ #define AUTORATE_TXCNT_THRESHOLD 20 #define AUTORATE_INC_THRESHOLD 30 /*+ * * Description: * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE * * Parameters: * In: * u8 - Rate value in SuppRates IE or ExtSuppRates IE * Out: * none * * Return Value: RateIdx * -*/ u16 RATEwGetRateIdx( u8 byRate ) { u16 ii; /* erase BasicRate flag */ byRate = byRate & 0x7F; for (ii = 0; ii < MAX_RATE; ii ++) { if (acbyIERate[ii] == byRate) return ii; } return 0; } /*+ * * Description: * Parsing the highest basic & support rate in rate field of frame. * * Parameters: * In: * pDevice - Pointer to the adapter * pItemRates - Pointer to Rate field defined in 802.11 spec. * pItemExtRates - Pointer to Extended Rate field defined in 802.11 spec. * Out: * pwMaxBasicRate - Maximum Basic Rate * pwMaxSuppRate - Maximum Supported Rate * pbyTopCCKRate - Maximum Basic Rate in CCK mode * pbyTopOFDMRate - Maximum Basic Rate in OFDM mode * * Return Value: none * -*/ void RATEvParseMaxRate(struct vnt_private *pDevice, PWLAN_IE_SUPP_RATES pItemRates, PWLAN_IE_SUPP_RATES pItemExtRates, int bUpdateBasicRate, u16 *pwMaxBasicRate, u16 *pwMaxSuppRate, u16 *pwSuppRate, u8 *pbyTopCCKRate, u8 *pbyTopOFDMRate) { int ii; u8 byHighSuppRate = 0, byRate = 0; u16 wOldBasicRate = pDevice->wBasicRate; u32 uRateLen; if (pItemRates == NULL) return; *pwSuppRate = 0; uRateLen = pItemRates->len; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate Len: %d\n", uRateLen); if (pDevice->byBBType != BB_TYPE_11B) { if (uRateLen > WLAN_RATES_MAXLEN) uRateLen = WLAN_RATES_MAXLEN; } else { if (uRateLen > WLAN_RATES_MAXLEN_11B) uRateLen = WLAN_RATES_MAXLEN_11B; } for (ii = 0; ii < uRateLen; ii++) { byRate = (u8)(pItemRates->abyRates[ii]); if (WLAN_MGMT_IS_BASICRATE(byRate) && (bUpdateBasicRate == true)) { /* * add to basic rate set, update pDevice->byTopCCKBasicRate and * pDevice->byTopOFDMBasicRate */ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate)); } byRate = (u8)(pItemRates->abyRates[ii]&0x7F); if (byHighSuppRate == 0) byHighSuppRate = byRate; if (byRate > byHighSuppRate) byHighSuppRate = byRate; *pwSuppRate |= (1<<RATEwGetRateIdx(byRate)); } if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) && (pDevice->byBBType != BB_TYPE_11B)) { unsigned int uExtRateLen = pItemExtRates->len; if (uExtRateLen > WLAN_RATES_MAXLEN) uExtRateLen = WLAN_RATES_MAXLEN; for (ii = 0; ii < uExtRateLen ; ii++) { byRate = (u8)(pItemExtRates->abyRates[ii]); /* select highest basic rate */ if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) { /* * add to basic rate set, update pDevice->byTopCCKBasicRate and * pDevice->byTopOFDMBasicRate */ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate)); } byRate = (u8)(pItemExtRates->abyRates[ii]&0x7F); if (byHighSuppRate == 0) byHighSuppRate = byRate; if (byRate > byHighSuppRate) byHighSuppRate = byRate; *pwSuppRate |= (1<<RATEwGetRateIdx(byRate)); /* DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n", RATEwGetRateIdx(byRate), byRate)); */ } } if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((void *)pDevice)) { pDevice->byPacketType = PK_TYPE_11GA; } *pbyTopCCKRate = pDevice->byTopCCKBasicRate; *pbyTopOFDMRate = pDevice->byTopOFDMBasicRate; *pwMaxSuppRate = RATEwGetRateIdx(byHighSuppRate); if ((pDevice->byPacketType==PK_TYPE_11B) || (pDevice->byPacketType==PK_TYPE_11GB)) *pwMaxBasicRate = pDevice->byTopCCKBasicRate; else *pwMaxBasicRate = pDevice->byTopOFDMBasicRate; if (wOldBasicRate != pDevice->wBasicRate) CARDvSetRSPINF((void *)pDevice, pDevice->byBBType); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n"); } /*+ * * Routine Description: * Rate fallback Algorithm Implementaion * * Parameters: * In: * pDevice - Pointer to the adapter * psNodeDBTable - Pointer to Node Data Base * Out: * none * * Return Value: none * -*/ #define AUTORATE_TXCNT_THRESHOLD 20 #define AUTORATE_INC_THRESHOLD 30 void RATEvTxRateFallBack(struct vnt_private *pDevice, PKnownNodeDB psNodeDBTable) { struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; u16 wIdxDownRate = 0; int ii; int bAutoRate[MAX_RATE] = {true, true, true, true, false, false, true, true, true, true, true, true}; u32 dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540}; u32 dwThroughput = 0; u16 wIdxUpRate = 0; u32 dwTxDiff = 0; if (pMgmt->eScanState != WMAC_NO_SCANNING) return; /* Don't do Fallback when scanning Channel */ psNodeDBTable->uTimeCount++; if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE]) dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE]; if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) && (dwTxDiff < AUTORATE_TXFAIL_CNT) && (psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) { return; } if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT) { psNodeDBTable->uTimeCount = 0; } for (ii = 0; ii < MAX_RATE; ii++) { if (psNodeDBTable->wSuppRate & (0x0001<<ii)) { if (bAutoRate[ii] == true) { wIdxUpRate = (u16) ii; } } else { bAutoRate[ii] = false; } } for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) { if ( (psNodeDBTable->uTxOk[ii] != 0) || (psNodeDBTable->uTxFail[ii] != 0) ) { dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii]; if (ii < RATE_11M) { psNodeDBTable->uTxFail[ii] *= 4; } dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate %d,Ok: %d, Fail:%d, Throughput:%d\n", ii, (int)psNodeDBTable->uTxOk[ii], (int)psNodeDBTable->uTxFail[ii], (int)dwThroughputTbl[ii]); } dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate]; wIdxDownRate = psNodeDBTable->wTxDataRate; for (ii = psNodeDBTable->wTxDataRate; ii > 0;) { ii--; if ( (dwThroughputTbl[ii] > dwThroughput) && (bAutoRate[ii]==true) ) { dwThroughput = dwThroughputTbl[ii]; wIdxDownRate = (u16) ii; } } psNodeDBTable->wTxDataRate = wIdxDownRate; if (psNodeDBTable->uTxOk[MAX_RATE]) { if (psNodeDBTable->uTxOk[MAX_RATE] > (psNodeDBTable->uTxFail[MAX_RATE] * 4) ) { psNodeDBTable->wTxDataRate = wIdxUpRate; } } else { /* adhoc, if uTxOk(total) == 0 & uTxFail(total) == 0 */ if (psNodeDBTable->uTxFail[MAX_RATE] == 0) psNodeDBTable->wTxDataRate = wIdxUpRate; } if (pDevice->byBBType == BB_TYPE_11A) { if (psNodeDBTable->wTxDataRate <= RATE_11M) psNodeDBTable->wTxDataRate = RATE_6M; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uTxOk[MAX_RATE] %d, uTxFail[MAX_RATE]:%d\n",(int)psNodeDBTable->uTxOk[MAX_RATE], (int)psNodeDBTable->uTxFail[MAX_RATE]); s_vResetCounter(psNodeDBTable); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", (int)psNodeDBTable->wTxDataRate, (int)wIdxUpRate, (int)wIdxDownRate); return; } /*+ * * Description: * This routine is used to assemble available Rate IE. * * Parameters: * In: * pDevice * Out: * * Return Value: None * -*/ u8 RATEuSetIE ( PWLAN_IE_SUPP_RATES pSrcRates, PWLAN_IE_SUPP_RATES pDstRates, unsigned int uRateLen ) { unsigned int ii, uu, uRateCnt = 0; if ((pSrcRates == NULL) || (pDstRates == NULL)) return 0; if (pSrcRates->len == 0) return 0; for (ii = 0; ii < uRateLen; ii++) { for (uu = 0; uu < pSrcRates->len; uu++) { if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) { pDstRates->abyRates[uRateCnt ++] = pSrcRates->abyRates[uu]; break; } } } return (u8)uRateCnt; }
gpl-2.0
XperianPro/android_kernel_xiaomi_aries-port
drivers/md/dm-table.c
2755
37033
/* * Copyright (C) 2001 Sistina Software (UK) Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm.h" #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/namei.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/atomic.h> #define DM_MSG_PREFIX "table" #define MAX_DEPTH 16 #define NODE_SIZE L1_CACHE_BYTES #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) /* * The table has always exactly one reference from either mapped_device->map * or hash_cell->new_map. This reference is not counted in table->holders. * A pair of dm_create_table/dm_destroy_table functions is used for table * creation/destruction. * * Temporary references from the other code increase table->holders. A pair * of dm_table_get/dm_table_put functions is used to manipulate it. * * When the table is about to be destroyed, we wait for table->holders to * drop to zero. */ struct dm_table { struct mapped_device *md; atomic_t holders; unsigned type; /* btree table */ unsigned int depth; unsigned int counts[MAX_DEPTH]; /* in nodes */ sector_t *index[MAX_DEPTH]; unsigned int num_targets; unsigned int num_allocated; sector_t *highs; struct dm_target *targets; struct target_type *immutable_target_type; unsigned integrity_supported:1; unsigned singleton:1; /* * Indicates the rw permissions for the new logical * device. This should be a combination of FMODE_READ * and FMODE_WRITE. */ fmode_t mode; /* a list of devices used by this table */ struct list_head devices; /* events get handed up using this callback */ void (*event_fn)(void *); void *event_context; struct dm_md_mempools *mempools; struct list_head target_callbacks; }; /* * Similar to ceiling(log_size(n)) */ static unsigned int int_log(unsigned int n, unsigned int base) { int result = 0; while (n > 1) { n = dm_div_up(n, base); result++; } return result; } /* * Calculate the index of the child node of the n'th node k'th key. */ static inline unsigned int get_child(unsigned int n, unsigned int k) { return (n * CHILDREN_PER_NODE) + k; } /* * Return the n'th node of level l from table t. */ static inline sector_t *get_node(struct dm_table *t, unsigned int l, unsigned int n) { return t->index[l] + (n * KEYS_PER_NODE); } /* * Return the highest key that you could lookup from the n'th * node on level l of the btree. */ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) { for (; l < t->depth - 1; l++) n = get_child(n, CHILDREN_PER_NODE - 1); if (n >= t->counts[l]) return (sector_t) - 1; return get_node(t, l, n)[KEYS_PER_NODE - 1]; } /* * Fills in a level of the btree based on the highs of the level * below it. */ static int setup_btree_index(unsigned int l, struct dm_table *t) { unsigned int n, k; sector_t *node; for (n = 0U; n < t->counts[l]; n++) { node = get_node(t, l, n); for (k = 0U; k < KEYS_PER_NODE; k++) node[k] = high(t, l + 1, get_child(n, k)); } return 0; } void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) { unsigned long size; void *addr; /* * Check that we're not going to overflow. */ if (nmemb > (ULONG_MAX / elem_size)) return NULL; size = nmemb * elem_size; addr = vzalloc(size); return addr; } EXPORT_SYMBOL(dm_vcalloc); /* * highs, and targets are managed as dynamic arrays during a * table load. */ static int alloc_targets(struct dm_table *t, unsigned int num) { sector_t *n_highs; struct dm_target *n_targets; int n = t->num_targets; /* * Allocate both the target array and offset array at once. * Append an empty entry to catch sectors beyond the end of * the device. */ n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + sizeof(sector_t)); if (!n_highs) return -ENOMEM; n_targets = (struct dm_target *) (n_highs + num); if (n) { memcpy(n_highs, t->highs, sizeof(*n_highs) * n); memcpy(n_targets, t->targets, sizeof(*n_targets) * n); } memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); vfree(t->highs); t->num_allocated = num; t->highs = n_highs; t->targets = n_targets; return 0; } int dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; INIT_LIST_HEAD(&t->devices); INIT_LIST_HEAD(&t->target_callbacks); atomic_set(&t->holders, 0); if (!num_targets) num_targets = KEYS_PER_NODE; num_targets = dm_round_up(num_targets, KEYS_PER_NODE); if (alloc_targets(t, num_targets)) { kfree(t); t = NULL; return -ENOMEM; } t->mode = mode; t->md = md; *result = t; return 0; } static void free_devices(struct list_head *devices) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, devices) { struct dm_dev_internal *dd = list_entry(tmp, struct dm_dev_internal, list); DMWARN("dm_table_destroy: dm_put_device call missing for %s", dd->dm_dev.name); kfree(dd); } } void dm_table_destroy(struct dm_table *t) { unsigned int i; if (!t) return; while (atomic_read(&t->holders)) msleep(1); smp_mb(); /* free the indexes */ if (t->depth >= 2) vfree(t->index[t->depth - 2]); /* free the targets */ for (i = 0; i < t->num_targets; i++) { struct dm_target *tgt = t->targets + i; if (tgt->type->dtr) tgt->type->dtr(tgt); dm_put_target_type(tgt->type); } vfree(t->highs); /* free the device list */ free_devices(&t->devices); dm_free_md_mempools(t->mempools); kfree(t); } void dm_table_get(struct dm_table *t) { atomic_inc(&t->holders); } EXPORT_SYMBOL(dm_table_get); void dm_table_put(struct dm_table *t) { if (!t) return; smp_mb__before_atomic_dec(); atomic_dec(&t->holders); } EXPORT_SYMBOL(dm_table_put); /* * Checks to see if we need to extend highs or targets. */ static inline int check_space(struct dm_table *t) { if (t->num_targets >= t->num_allocated) return alloc_targets(t, t->num_allocated * 2); return 0; } /* * See if we've already got a device in the list. */ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) { struct dm_dev_internal *dd; list_for_each_entry (dd, l, list) if (dd->dm_dev.bdev->bd_dev == dev) return dd; return NULL; } /* * Open a device so we can use it as a map destination. */ static int open_dev(struct dm_dev_internal *d, dev_t dev, struct mapped_device *md) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; BUG_ON(d->dm_dev.bdev); bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr); if (IS_ERR(bdev)) return PTR_ERR(bdev); r = bd_link_disk_holder(bdev, dm_disk(md)); if (r) { blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL); return r; } d->dm_dev.bdev = bdev; return 0; } /* * Close a device that we've been using. */ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) { if (!d->dm_dev.bdev) return; bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md)); blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL); d->dm_dev.bdev = NULL; } /* * If possible, this checks an area of a destination device is invalid. */ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q; struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; sector_t dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; unsigned short logical_block_size_sectors = limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; /* * Some devices exist without request functions, * such as loop devices not yet bound to backing files. * Forbid the use of such devices. */ q = bdev_get_queue(bdev); if (!q || !q->make_request_fn) { DMWARN("%s: %s is not yet initialised: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (!dev_size) return 0; if ((start >= dev_size) || (start + len > dev_size)) { DMWARN("%s: %s too small for target: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (logical_block_size_sectors <= 1) return 0; if (start & (logical_block_size_sectors - 1)) { DMWARN("%s: start=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)start, limits->logical_block_size, bdevname(bdev, b)); return 1; } if (len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)len, limits->logical_block_size, bdevname(bdev, b)); return 1; } return 0; } /* * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the * device and not to touch the existing bdev field in case * it is accessed concurrently inside dm_table_any_congested(). */ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) { int r; struct dm_dev_internal dd_new, dd_old; dd_new = dd_old = *dd; dd_new.dm_dev.mode |= new_mode; dd_new.dm_dev.bdev = NULL; r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); if (r) return r; dd->dm_dev.mode |= new_mode; close_dev(&dd_old, md); return 0; } /* * Add a device to the list, or just increment the usage count if * it's already present. */ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, struct dm_dev **result) { int r; dev_t uninitialized_var(dev); struct dm_dev_internal *dd; unsigned int major, minor; struct dm_table *t = ti->table; char dummy; BUG_ON(!t); if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { /* Extract the major/minor numbers */ dev = MKDEV(major, minor); if (MAJOR(dev) != major || MINOR(dev) != minor) return -EOVERFLOW; } else { /* convert the path to a device */ struct block_device *bdev = lookup_bdev(path); if (IS_ERR(bdev)) return PTR_ERR(bdev); dev = bdev->bd_dev; bdput(bdev); } dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; dd->dm_dev.mode = mode; dd->dm_dev.bdev = NULL; if ((r = open_dev(dd, dev, t->md))) { kfree(dd); return r; } format_dev_t(dd->dm_dev.name, dev); atomic_set(&dd->count, 0); list_add(&dd->list, &t->devices); } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { r = upgrade_mode(dd, mode, t->md); if (r) return r; } atomic_inc(&dd->count); *result = &dd->dm_dev; return 0; } EXPORT_SYMBOL(dm_get_device); int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; struct request_queue *q = bdev_get_queue(bdev); char b[BDEVNAME_SIZE]; if (unlikely(!q)) { DMWARN("%s: Cannot set limits for nonexistent device %s", dm_device_name(ti->table->md), bdevname(bdev, b)); return 0; } if (bdev_stack_limits(limits, bdev, start) < 0) DMWARN("%s: adding target device %s caused an alignment inconsistency: " "physical_block_size=%u, logical_block_size=%u, " "alignment_offset=%u, start=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), q->limits.physical_block_size, q->limits.logical_block_size, q->limits.alignment_offset, (unsigned long long) start << SECTOR_SHIFT); /* * Check if merge fn is supported. * If not we'll force DM to use PAGE_SIZE or * smaller I/O, just to be safe. */ if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) blk_limits_max_hw_sectors(limits, (unsigned int) (PAGE_SIZE >> 9)); return 0; } EXPORT_SYMBOL_GPL(dm_set_device_limits); /* * Decrement a device's use count and remove it if necessary. */ void dm_put_device(struct dm_target *ti, struct dm_dev *d) { struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, dm_dev); if (atomic_dec_and_test(&dd->count)) { close_dev(dd, ti->table->md); list_del(&dd->list); kfree(dd); } } EXPORT_SYMBOL(dm_put_device); /* * Checks to see if the target joins onto the end of the table. */ static int adjoin(struct dm_table *table, struct dm_target *ti) { struct dm_target *prev; if (!table->num_targets) return !ti->begin; prev = &table->targets[table->num_targets - 1]; return (ti->begin == (prev->begin + prev->len)); } /* * Used to dynamically allocate the arg array. */ static char **realloc_argv(unsigned *array_size, char **old_argv) { char **argv; unsigned new_size; new_size = *array_size ? *array_size * 2 : 64; argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); if (argv) { memcpy(argv, old_argv, *array_size * sizeof(*argv)); *array_size = new_size; } kfree(old_argv); return argv; } /* * Destructively splits up the argument list to pass to ctr. */ int dm_split_args(int *argc, char ***argvp, char *input) { char *start, *end = input, *out, **argv = NULL; unsigned array_size = 0; *argc = 0; if (!input) { *argvp = NULL; return 0; } argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; while (1) { /* Skip whitespace */ start = skip_spaces(end); if (!*start) break; /* success, we hit the end */ /* 'out' is used to remove any back-quotes */ end = out = start; while (*end) { /* Everything apart from '\0' can be quoted */ if (*end == '\\' && *(end + 1)) { *out++ = *(end + 1); end += 2; continue; } if (isspace(*end)) break; /* end of token */ *out++ = *end++; } /* have we already filled the array ? */ if ((*argc + 1) > array_size) { argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; } /* we know this is whitespace */ if (*end) end++; /* terminate the string and put it in the array */ *out = '\0'; argv[*argc] = start; (*argc)++; } *argvp = argv; return 0; } /* * Impose necessary and sufficient conditions on a devices's table such * that any incoming bio which respects its logical_block_size can be * processed successfully. If it falls across the boundary between * two or more targets, the size of each piece it gets split into must * be compatible with the logical_block_size of the target processing it. */ static int validate_hardware_logical_block_alignment(struct dm_table *table, struct queue_limits *limits) { /* * This function uses arithmetic modulo the logical_block_size * (in units of 512-byte sectors). */ unsigned short device_logical_block_size_sects = limits->logical_block_size >> SECTOR_SHIFT; /* * Offset of the start of the next table entry, mod logical_block_size. */ unsigned short next_target_start = 0; /* * Given an aligned bio that extends beyond the end of a * target, how many sectors must the next target handle? */ unsigned short remaining = 0; struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; /* * Check each entry in the table in turn. */ while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); blk_set_stacking_limits(&ti_limits); /* combine all target devices' limits */ if (ti->type->iterate_devices) ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* * If the remaining sectors fall entirely within this * table entry are they compatible with its logical_block_size? */ if (remaining < ti->len && remaining & ((ti_limits.logical_block_size >> SECTOR_SHIFT) - 1)) break; /* Error */ next_target_start = (unsigned short) ((next_target_start + ti->len) & (device_logical_block_size_sects - 1)); remaining = next_target_start ? device_logical_block_size_sects - next_target_start : 0; } if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " "not aligned to h/w logical block size %u", dm_device_name(table->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, limits->logical_block_size); return -EINVAL; } return 0; } int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params) { int r = -EINVAL, argc; char **argv; struct dm_target *tgt; if (t->singleton) { DMERR("%s: target type %s must appear alone in table", dm_device_name(t->md), t->targets->type->name); return -EINVAL; } if ((r = check_space(t))) return r; tgt = t->targets + t->num_targets; memset(tgt, 0, sizeof(*tgt)); if (!len) { DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } tgt->type = dm_get_target_type(type); if (!tgt->type) { DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } if (dm_target_needs_singleton(tgt->type)) { if (t->num_targets) { DMERR("%s: target type %s must appear alone in table", dm_device_name(t->md), type); return -EINVAL; } t->singleton = 1; } if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { DMERR("%s: target type %s may not be included in read-only tables", dm_device_name(t->md), type); return -EINVAL; } if (t->immutable_target_type) { if (t->immutable_target_type != tgt->type) { DMERR("%s: immutable target type %s cannot be mixed with other target types", dm_device_name(t->md), t->immutable_target_type->name); return -EINVAL; } } else if (dm_target_is_immutable(tgt->type)) { if (t->num_targets) { DMERR("%s: immutable target type %s cannot be mixed with other target types", dm_device_name(t->md), tgt->type->name); return -EINVAL; } t->immutable_target_type = tgt->type; } tgt->table = t; tgt->begin = start; tgt->len = len; tgt->error = "Unknown error"; /* * Does this target adjoin the previous one ? */ if (!adjoin(t, tgt)) { tgt->error = "Gap in table"; r = -EINVAL; goto bad; } r = dm_split_args(&argc, &argv, params); if (r) { tgt->error = "couldn't split parameters (insufficient memory)"; goto bad; } r = tgt->type->ctr(tgt, argc, argv); kfree(argv); if (r) goto bad; t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; if (!tgt->num_discard_requests && tgt->discards_supported) DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.", dm_device_name(t->md), type); return 0; bad: DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); dm_put_target_type(tgt->type); return r; } /* * Target argument parsing helpers. */ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error, unsigned grouped) { const char *arg_str = dm_shift_arg(arg_set); char dummy; if (!arg_str || (sscanf(arg_str, "%u%c", value, &dummy) != 1) || (*value < arg->min) || (*value > arg->max) || (grouped && arg_set->argc < *value)) { *error = arg->error; return -EINVAL; } return 0; } int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 0); } EXPORT_SYMBOL(dm_read_arg); int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 1); } EXPORT_SYMBOL(dm_read_arg_group); const char *dm_shift_arg(struct dm_arg_set *as) { char *r; if (as->argc) { as->argc--; r = *as->argv; as->argv++; return r; } return NULL; } EXPORT_SYMBOL(dm_shift_arg); void dm_consume_args(struct dm_arg_set *as, unsigned num_args) { BUG_ON(as->argc < num_args); as->argc -= num_args; as->argv += num_args; } EXPORT_SYMBOL(dm_consume_args); static int dm_table_set_type(struct dm_table *t) { unsigned i; unsigned bio_based = 0, request_based = 0; struct dm_target *tgt; struct dm_dev_internal *dd; struct list_head *devices; for (i = 0; i < t->num_targets; i++) { tgt = t->targets + i; if (dm_target_request_based(tgt)) request_based = 1; else bio_based = 1; if (bio_based && request_based) { DMWARN("Inconsistent table: different target types" " can't be mixed up"); return -EINVAL; } } if (bio_based) { /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; return 0; } BUG_ON(!request_based); /* No targets in this table */ /* Non-request-stackable devices can't be used for request-based dm */ devices = dm_table_get_devices(t); list_for_each_entry(dd, devices, list) { if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { DMWARN("table load rejected: including" " non-request-stackable devices"); return -EINVAL; } } /* * Request-based dm supports only tables that have a single target now. * To support multiple targets, request splitting support is needed, * and that needs lots of changes in the block-layer. * (e.g. request completion process for partial completion.) */ if (t->num_targets > 1) { DMWARN("Request-based dm doesn't support multiple targets yet"); return -EINVAL; } t->type = DM_TYPE_REQUEST_BASED; return 0; } unsigned dm_table_get_type(struct dm_table *t) { return t->type; } struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) { return t->immutable_target_type; } bool dm_table_request_based(struct dm_table *t) { return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; } int dm_table_alloc_md_mempools(struct dm_table *t) { unsigned type = dm_table_get_type(t); if (unlikely(type == DM_TYPE_NONE)) { DMWARN("no table type is set, can't allocate mempools"); return -EINVAL; } t->mempools = dm_alloc_md_mempools(type, t->integrity_supported); if (!t->mempools) return -ENOMEM; return 0; } void dm_table_free_md_mempools(struct dm_table *t) { dm_free_md_mempools(t->mempools); t->mempools = NULL; } struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) { return t->mempools; } static int setup_indexes(struct dm_table *t) { int i; unsigned int total = 0; sector_t *indexes; /* allocate the space for *all* the indexes */ for (i = t->depth - 2; i >= 0; i--) { t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); total += t->counts[i]; } indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); if (!indexes) return -ENOMEM; /* set up internal nodes, bottom-up */ for (i = t->depth - 2; i >= 0; i--) { t->index[i] = indexes; indexes += (KEYS_PER_NODE * t->counts[i]); setup_btree_index(i, t); } return 0; } /* * Builds the btree to index the map. */ static int dm_table_build_index(struct dm_table *t) { int r = 0; unsigned int leaf_nodes; /* how many indexes will the btree have ? */ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); /* leaf layer has already been set up */ t->counts[t->depth - 1] = leaf_nodes; t->index[t->depth - 1] = t->highs; if (t->depth >= 2) r = setup_indexes(t); return r; } /* * Get a disk whose integrity profile reflects the table's profile. * If %match_all is true, all devices' profiles must match. * If %match_all is false, all devices must at least have an * allocated integrity profile; but uninitialized is ok. * Returns NULL if integrity support was inconsistent or unavailable. */ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, bool match_all) { struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; struct gendisk *prev_disk = NULL, *template_disk = NULL; list_for_each_entry(dd, devices, list) { template_disk = dd->dm_dev.bdev->bd_disk; if (!blk_get_integrity(template_disk)) goto no_integrity; if (!match_all && !blk_integrity_is_initialized(template_disk)) continue; /* skip uninitialized profiles */ else if (prev_disk && blk_integrity_compare(prev_disk, template_disk) < 0) goto no_integrity; prev_disk = template_disk; } return template_disk; no_integrity: if (prev_disk) DMWARN("%s: integrity not set: %s and %s profile mismatch", dm_device_name(t->md), prev_disk->disk_name, template_disk->disk_name); return NULL; } /* * Register the mapped device for blk_integrity support if * the underlying devices have an integrity profile. But all devices * may not have matching profiles (checking all devices isn't reliable * during table load because this table may use other DM device(s) which * must be resumed before they will have an initialized integity profile). * Stacked DM devices force a 2 stage integrity profile validation: * 1 - during load, validate all initialized integrity profiles match * 2 - during resume, validate all integrity profiles match */ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) { struct gendisk *template_disk = NULL; template_disk = dm_table_get_integrity_disk(t, false); if (!template_disk) return 0; if (!blk_integrity_is_initialized(dm_disk(md))) { t->integrity_supported = 1; return blk_integrity_register(dm_disk(md), NULL); } /* * If DM device already has an initalized integrity * profile the new profile should not conflict. */ if (blk_integrity_is_initialized(template_disk) && blk_integrity_compare(dm_disk(md), template_disk) < 0) { DMWARN("%s: conflict with existing integrity profile: " "%s profile mismatch", dm_device_name(t->md), template_disk->disk_name); return 1; } /* Preserve existing initialized integrity profile */ t->integrity_supported = 1; return 0; } /* * Prepares the table for use by building the indices, * setting the type, and allocating mempools. */ int dm_table_complete(struct dm_table *t) { int r; r = dm_table_set_type(t); if (r) { DMERR("unable to set table type"); return r; } r = dm_table_build_index(t); if (r) { DMERR("unable to build btrees"); return r; } r = dm_table_prealloc_integrity(t, t->md); if (r) { DMERR("could not register integrity profile."); return r; } r = dm_table_alloc_md_mempools(t); if (r) DMERR("unable to allocate mempools"); return r; } static DEFINE_MUTEX(_event_lock); void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context) { mutex_lock(&_event_lock); t->event_fn = fn; t->event_context = context; mutex_unlock(&_event_lock); } void dm_table_event(struct dm_table *t) { /* * You can no longer call dm_table_event() from interrupt * context, use a bottom half instead. */ BUG_ON(in_interrupt()); mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); mutex_unlock(&_event_lock); } EXPORT_SYMBOL(dm_table_event); sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; } EXPORT_SYMBOL(dm_table_get_size); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) { if (index >= t->num_targets) return NULL; return t->targets + index; } /* * Search the btree for the correct target. * * Caller should check returned pointer with dm_target_is_valid() * to trap I/O beyond end of device. */ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) { unsigned int l, n = 0, k = 0; sector_t *node; for (l = 0; l < t->depth; l++) { n = get_child(n, k); node = get_node(t, l, n); for (k = 0; k < KEYS_PER_NODE; k++) if (node[k] >= sector) break; } return &t->targets[(KEYS_PER_NODE * n) + k]; } /* * Establish the new table's queue_limits and validate them. */ int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits *limits) { struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; blk_set_stacking_limits(limits); while (i < dm_table_get_num_targets(table)) { blk_set_stacking_limits(&ti_limits); ti = dm_table_get_target(table, i++); if (!ti->type->iterate_devices) goto combine_limits; /* * Combine queue limits of all the devices this target uses. */ ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* Set I/O hints portion of queue limits */ if (ti->type->io_hints) ti->type->io_hints(ti, &ti_limits); /* * Check each device area is consistent with the target's * overall queue limits. */ if (ti->type->iterate_devices(ti, device_area_is_invalid, &ti_limits)) return -EINVAL; combine_limits: /* * Merge this target's queue limits into the overall limits * for the table. */ if (blk_stack_limits(limits, &ti_limits, 0) < 0) DMWARN("%s: adding target device " "(start sect %llu len %llu) " "caused an alignment inconsistency", dm_device_name(table->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); } return validate_hardware_logical_block_alignment(table, limits); } /* * Set the integrity profile for this device if all devices used have * matching profiles. We're quite deep in the resume path but still * don't know if all devices (particularly DM devices this device * may be stacked on) have matching profiles. Even if the profiles * don't match we have no way to fail (to resume) at this point. */ static void dm_table_set_integrity(struct dm_table *t) { struct gendisk *template_disk = NULL; if (!blk_get_integrity(dm_disk(t->md))) return; template_disk = dm_table_get_integrity_disk(t, true); if (template_disk) blk_integrity_register(dm_disk(t->md), blk_get_integrity(template_disk)); else if (blk_integrity_is_initialized(dm_disk(t->md))) DMWARN("%s: device no longer has a valid integrity profile", dm_device_name(t->md)); else DMWARN("%s: unable to establish an integrity profile", dm_device_name(t->md)); } static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { unsigned flush = (*(unsigned *)data); struct request_queue *q = bdev_get_queue(dev->bdev); return q && (q->flush_flags & flush); } static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) { struct dm_target *ti; unsigned i = 0; /* * Require at least one underlying device to support flushes. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting flushes must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_flush_requests) continue; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_flush_capable, &flush)) return 1; } return 0; } static bool dm_table_discard_zeroes_data(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* Ensure that all targets supports discard_zeroes_data. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (ti->discard_zeroes_data_unsupported) return 0; } return 1; } static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_nonrot(q); } static bool dm_table_is_nonrot(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* Ensure that all underlying device are non-rotational. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->type->iterate_devices || !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) return 0; } return 1; } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { unsigned flush = 0; /* * Copy table's limits to the DM device's request_queue */ q->limits = *limits; if (!dm_table_supports_discards(t)) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); else queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); if (dm_table_supports_flush(t, REQ_FLUSH)) { flush |= REQ_FLUSH; if (dm_table_supports_flush(t, REQ_FUA)) flush |= REQ_FUA; } blk_queue_flush(q, flush); if (!dm_table_discard_zeroes_data(t)) q->limits.discard_zeroes_data = 0; if (dm_table_is_nonrot(t)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); dm_table_set_integrity(t); /* * QUEUE_FLAG_STACKABLE must be set after all queue settings are * visible to other CPUs because, once the flag is set, incoming bios * are processed by request-based dm, which refers to the queue * settings. * Until the flag set, bios are passed to bio-based dm and queued to * md->deferred where queue settings are not needed yet. * Those bios are passed to request-based dm at the resume time. */ smp_mb(); if (dm_table_request_based(t)) queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); } unsigned int dm_table_get_num_targets(struct dm_table *t) { return t->num_targets; } struct list_head *dm_table_get_devices(struct dm_table *t) { return &t->devices; } fmode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } EXPORT_SYMBOL(dm_table_get_mode); static void suspend_targets(struct dm_table *t, unsigned postsuspend) { int i = t->num_targets; struct dm_target *ti = t->targets; while (i--) { if (postsuspend) { if (ti->type->postsuspend) ti->type->postsuspend(ti); } else if (ti->type->presuspend) ti->type->presuspend(ti); ti++; } } void dm_table_presuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, 0); } void dm_table_postsuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, 1); } int dm_table_resume_targets(struct dm_table *t) { int i, r = 0; for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (!ti->type->preresume) continue; r = ti->type->preresume(ti); if (r) return r; } for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (ti->type->resume) ti->type->resume(ti); } return 0; } void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) { list_add(&cb->list, &t->target_callbacks); } EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); int dm_table_any_congested(struct dm_table *t, int bdi_bits) { struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); struct dm_target_callbacks *cb; int r = 0; list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); char b[BDEVNAME_SIZE]; if (likely(q)) r |= bdi_congested(&q->backing_dev_info, bdi_bits); else DMWARN_LIMIT("%s: any_congested: nonexistent device %s", dm_device_name(t->md), bdevname(dd->dm_dev.bdev, b)); } list_for_each_entry(cb, &t->target_callbacks, list) if (cb->congested_fn) r |= cb->congested_fn(cb, bdi_bits); return r; } int dm_table_any_busy_target(struct dm_table *t) { unsigned i; struct dm_target *ti; for (i = 0; i < t->num_targets; i++) { ti = t->targets + i; if (ti->type->busy && ti->type->busy(ti)) return 1; } return 0; } struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; } EXPORT_SYMBOL(dm_table_get_md); static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_discard(q); } bool dm_table_supports_discards(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* * Unless any target used by the table set discards_supported, * require at least one underlying device to support discards. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting discard selectively must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_discard_requests) continue; if (ti->discards_supported) return 1; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_discard_capable, NULL)) return 1; } return 0; }
gpl-2.0
zarboz/EvilZ-Kernel122
drivers/media/dvb/frontends/zl10353.c
3267
16739
/* * Driver for Zarlink DVB-T ZL10353 demodulator * * Copyright (C) 2006, 2007 Christopher Pascoe <c.pascoe@itee.uq.edu.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "zl10353_priv.h" #include "zl10353.h" struct zl10353_state { struct i2c_adapter *i2c; struct dvb_frontend frontend; struct zl10353_config config; enum fe_bandwidth bandwidth; u32 ucblocks; u32 frequency; }; static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "zl10353: " args); \ } while (0) static int debug_regs; static int zl10353_single_write(struct dvb_frontend *fe, u8 reg, u8 val) { struct zl10353_state *state = fe->demodulator_priv; u8 buf[2] = { reg, val }; struct i2c_msg msg = { .addr = state->config.demod_address, .flags = 0, .buf = buf, .len = 2 }; int err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk("zl10353: write to reg %x failed (err = %d)!\n", reg, err); return err; } return 0; } static int zl10353_write(struct dvb_frontend *fe, const u8 ibuf[], int ilen) { int err, i; for (i = 0; i < ilen - 1; i++) if ((err = zl10353_single_write(fe, ibuf[0] + i, ibuf[i + 1]))) return err; return 0; } static int zl10353_read_register(struct zl10353_state *state, u8 reg) { int ret; u8 b0[1] = { reg }; u8 b1[1] = { 0 }; struct i2c_msg msg[2] = { { .addr = state->config.demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk("%s: readreg error (reg=%d, ret==%i)\n", __func__, reg, ret); return ret; } return b1[0]; } static void zl10353_dump_regs(struct dvb_frontend *fe) { struct zl10353_state *state = fe->demodulator_priv; int ret; u8 reg; /* Dump all registers. */ for (reg = 0; ; reg++) { if (reg % 16 == 0) { if (reg) printk(KERN_CONT "\n"); printk(KERN_DEBUG "%02x:", reg); } ret = zl10353_read_register(state, reg); if (ret >= 0) printk(KERN_CONT " %02x", (u8)ret); else printk(KERN_CONT " --"); if (reg == 0xff) break; } printk(KERN_CONT "\n"); } static void zl10353_calc_nominal_rate(struct dvb_frontend *fe, enum fe_bandwidth bandwidth, u16 *nominal_rate) { struct zl10353_state *state = fe->demodulator_priv; u32 adc_clock = 450560; /* 45.056 MHz */ u64 value; u8 bw; if (state->config.adc_clock) adc_clock = state->config.adc_clock; switch (bandwidth) { case BANDWIDTH_6_MHZ: bw = 6; break; case BANDWIDTH_7_MHZ: bw = 7; break; case BANDWIDTH_8_MHZ: default: bw = 8; break; } value = (u64)10 * (1 << 23) / 7 * 125; value = (bw * value) + adc_clock / 2; do_div(value, adc_clock); *nominal_rate = value; dprintk("%s: bw %d, adc_clock %d => 0x%x\n", __func__, bw, adc_clock, *nominal_rate); } static void zl10353_calc_input_freq(struct dvb_frontend *fe, u16 *input_freq) { struct zl10353_state *state = fe->demodulator_priv; u32 adc_clock = 450560; /* 45.056 MHz */ int if2 = 361667; /* 36.1667 MHz */ int ife; u64 value; if (state->config.adc_clock) adc_clock = state->config.adc_clock; if (state->config.if2) if2 = state->config.if2; if (adc_clock >= if2 * 2) ife = if2; else { ife = adc_clock - (if2 % adc_clock); if (ife > adc_clock / 2) ife = adc_clock - ife; } value = (u64)65536 * ife + adc_clock / 2; do_div(value, adc_clock); *input_freq = -value; dprintk("%s: if2 %d, ife %d, adc_clock %d => %d / 0x%x\n", __func__, if2, ife, adc_clock, -(int)value, *input_freq); } static int zl10353_sleep(struct dvb_frontend *fe) { static u8 zl10353_softdown[] = { 0x50, 0x0C, 0x44 }; zl10353_write(fe, zl10353_softdown, sizeof(zl10353_softdown)); return 0; } static int zl10353_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) { struct zl10353_state *state = fe->demodulator_priv; u16 nominal_rate, input_freq; u8 pllbuf[6] = { 0x67 }, acq_ctl = 0; u16 tps = 0; struct dvb_ofdm_parameters *op = &param->u.ofdm; state->frequency = param->frequency; zl10353_single_write(fe, RESET, 0x80); udelay(200); zl10353_single_write(fe, 0xEA, 0x01); udelay(200); zl10353_single_write(fe, 0xEA, 0x00); zl10353_single_write(fe, AGC_TARGET, 0x28); if (op->transmission_mode != TRANSMISSION_MODE_AUTO) acq_ctl |= (1 << 0); if (op->guard_interval != GUARD_INTERVAL_AUTO) acq_ctl |= (1 << 1); zl10353_single_write(fe, ACQ_CTL, acq_ctl); switch (op->bandwidth) { case BANDWIDTH_6_MHZ: /* These are extrapolated from the 7 and 8MHz values */ zl10353_single_write(fe, MCLK_RATIO, 0x97); zl10353_single_write(fe, 0x64, 0x34); zl10353_single_write(fe, 0xcc, 0xdd); break; case BANDWIDTH_7_MHZ: zl10353_single_write(fe, MCLK_RATIO, 0x86); zl10353_single_write(fe, 0x64, 0x35); zl10353_single_write(fe, 0xcc, 0x73); break; case BANDWIDTH_8_MHZ: default: zl10353_single_write(fe, MCLK_RATIO, 0x75); zl10353_single_write(fe, 0x64, 0x36); zl10353_single_write(fe, 0xcc, 0x73); } zl10353_calc_nominal_rate(fe, op->bandwidth, &nominal_rate); zl10353_single_write(fe, TRL_NOMINAL_RATE_1, msb(nominal_rate)); zl10353_single_write(fe, TRL_NOMINAL_RATE_0, lsb(nominal_rate)); state->bandwidth = op->bandwidth; zl10353_calc_input_freq(fe, &input_freq); zl10353_single_write(fe, INPUT_FREQ_1, msb(input_freq)); zl10353_single_write(fe, INPUT_FREQ_0, lsb(input_freq)); /* Hint at TPS settings */ switch (op->code_rate_HP) { case FEC_2_3: tps |= (1 << 7); break; case FEC_3_4: tps |= (2 << 7); break; case FEC_5_6: tps |= (3 << 7); break; case FEC_7_8: tps |= (4 << 7); break; case FEC_1_2: case FEC_AUTO: break; default: return -EINVAL; } switch (op->code_rate_LP) { case FEC_2_3: tps |= (1 << 4); break; case FEC_3_4: tps |= (2 << 4); break; case FEC_5_6: tps |= (3 << 4); break; case FEC_7_8: tps |= (4 << 4); break; case FEC_1_2: case FEC_AUTO: break; case FEC_NONE: if (op->hierarchy_information == HIERARCHY_AUTO || op->hierarchy_information == HIERARCHY_NONE) break; default: return -EINVAL; } switch (op->constellation) { case QPSK: break; case QAM_AUTO: case QAM_16: tps |= (1 << 13); break; case QAM_64: tps |= (2 << 13); break; default: return -EINVAL; } switch (op->transmission_mode) { case TRANSMISSION_MODE_2K: case TRANSMISSION_MODE_AUTO: break; case TRANSMISSION_MODE_8K: tps |= (1 << 0); break; default: return -EINVAL; } switch (op->guard_interval) { case GUARD_INTERVAL_1_32: case GUARD_INTERVAL_AUTO: break; case GUARD_INTERVAL_1_16: tps |= (1 << 2); break; case GUARD_INTERVAL_1_8: tps |= (2 << 2); break; case GUARD_INTERVAL_1_4: tps |= (3 << 2); break; default: return -EINVAL; } switch (op->hierarchy_information) { case HIERARCHY_AUTO: case HIERARCHY_NONE: break; case HIERARCHY_1: tps |= (1 << 10); break; case HIERARCHY_2: tps |= (2 << 10); break; case HIERARCHY_4: tps |= (3 << 10); break; default: return -EINVAL; } zl10353_single_write(fe, TPS_GIVEN_1, msb(tps)); zl10353_single_write(fe, TPS_GIVEN_0, lsb(tps)); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* * If there is no tuner attached to the secondary I2C bus, we call * set_params to program a potential tuner attached somewhere else. * Otherwise, we update the PLL registers via calc_regs. */ if (state->config.no_tuner) { if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, param); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } } else if (fe->ops.tuner_ops.calc_regs) { fe->ops.tuner_ops.calc_regs(fe, param, pllbuf + 1, 5); pllbuf[1] <<= 1; zl10353_write(fe, pllbuf, sizeof(pllbuf)); } zl10353_single_write(fe, 0x5F, 0x13); /* If no attached tuner or invalid PLL registers, just start the FSM. */ if (state->config.no_tuner || fe->ops.tuner_ops.calc_regs == NULL) zl10353_single_write(fe, FSM_GO, 0x01); else zl10353_single_write(fe, TUNER_GO, 0x01); return 0; } static int zl10353_get_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) { struct zl10353_state *state = fe->demodulator_priv; struct dvb_ofdm_parameters *op = &param->u.ofdm; int s6, s9; u16 tps; static const u8 tps_fec_to_api[8] = { FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_7_8, FEC_AUTO, FEC_AUTO, FEC_AUTO }; s6 = zl10353_read_register(state, STATUS_6); s9 = zl10353_read_register(state, STATUS_9); if (s6 < 0 || s9 < 0) return -EREMOTEIO; if ((s6 & (1 << 5)) == 0 || (s9 & (1 << 4)) == 0) return -EINVAL; /* no FE or TPS lock */ tps = zl10353_read_register(state, TPS_RECEIVED_1) << 8 | zl10353_read_register(state, TPS_RECEIVED_0); op->code_rate_HP = tps_fec_to_api[(tps >> 7) & 7]; op->code_rate_LP = tps_fec_to_api[(tps >> 4) & 7]; switch ((tps >> 13) & 3) { case 0: op->constellation = QPSK; break; case 1: op->constellation = QAM_16; break; case 2: op->constellation = QAM_64; break; default: op->constellation = QAM_AUTO; break; } op->transmission_mode = (tps & 0x01) ? TRANSMISSION_MODE_8K : TRANSMISSION_MODE_2K; switch ((tps >> 2) & 3) { case 0: op->guard_interval = GUARD_INTERVAL_1_32; break; case 1: op->guard_interval = GUARD_INTERVAL_1_16; break; case 2: op->guard_interval = GUARD_INTERVAL_1_8; break; case 3: op->guard_interval = GUARD_INTERVAL_1_4; break; default: op->guard_interval = GUARD_INTERVAL_AUTO; break; } switch ((tps >> 10) & 7) { case 0: op->hierarchy_information = HIERARCHY_NONE; break; case 1: op->hierarchy_information = HIERARCHY_1; break; case 2: op->hierarchy_information = HIERARCHY_2; break; case 3: op->hierarchy_information = HIERARCHY_4; break; default: op->hierarchy_information = HIERARCHY_AUTO; break; } param->frequency = state->frequency; op->bandwidth = state->bandwidth; param->inversion = INVERSION_AUTO; return 0; } static int zl10353_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct zl10353_state *state = fe->demodulator_priv; int s6, s7, s8; if ((s6 = zl10353_read_register(state, STATUS_6)) < 0) return -EREMOTEIO; if ((s7 = zl10353_read_register(state, STATUS_7)) < 0) return -EREMOTEIO; if ((s8 = zl10353_read_register(state, STATUS_8)) < 0) return -EREMOTEIO; *status = 0; if (s6 & (1 << 2)) *status |= FE_HAS_CARRIER; if (s6 & (1 << 1)) *status |= FE_HAS_VITERBI; if (s6 & (1 << 5)) *status |= FE_HAS_LOCK; if (s7 & (1 << 4)) *status |= FE_HAS_SYNC; if (s8 & (1 << 6)) *status |= FE_HAS_SIGNAL; if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) *status &= ~FE_HAS_LOCK; return 0; } static int zl10353_read_ber(struct dvb_frontend *fe, u32 *ber) { struct zl10353_state *state = fe->demodulator_priv; *ber = zl10353_read_register(state, RS_ERR_CNT_2) << 16 | zl10353_read_register(state, RS_ERR_CNT_1) << 8 | zl10353_read_register(state, RS_ERR_CNT_0); return 0; } static int zl10353_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct zl10353_state *state = fe->demodulator_priv; u16 signal = zl10353_read_register(state, AGC_GAIN_1) << 10 | zl10353_read_register(state, AGC_GAIN_0) << 2 | 3; *strength = ~signal; return 0; } static int zl10353_read_snr(struct dvb_frontend *fe, u16 *snr) { struct zl10353_state *state = fe->demodulator_priv; u8 _snr; if (debug_regs) zl10353_dump_regs(fe); _snr = zl10353_read_register(state, SNR); *snr = (_snr << 8) | _snr; return 0; } static int zl10353_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct zl10353_state *state = fe->demodulator_priv; u32 ubl = 0; ubl = zl10353_read_register(state, RS_UBC_1) << 8 | zl10353_read_register(state, RS_UBC_0); state->ucblocks += ubl; *ucblocks = state->ucblocks; return 0; } static int zl10353_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fe_tune_settings) { fe_tune_settings->min_delay_ms = 1000; fe_tune_settings->step_size = 0; fe_tune_settings->max_drift = 0; return 0; } static int zl10353_init(struct dvb_frontend *fe) { struct zl10353_state *state = fe->demodulator_priv; u8 zl10353_reset_attach[6] = { 0x50, 0x03, 0x64, 0x46, 0x15, 0x0F }; int rc = 0; if (debug_regs) zl10353_dump_regs(fe); if (state->config.parallel_ts) zl10353_reset_attach[2] &= ~0x20; if (state->config.clock_ctl_1) zl10353_reset_attach[3] = state->config.clock_ctl_1; if (state->config.pll_0) zl10353_reset_attach[4] = state->config.pll_0; /* Do a "hard" reset if not already done */ if (zl10353_read_register(state, 0x50) != zl10353_reset_attach[1] || zl10353_read_register(state, 0x51) != zl10353_reset_attach[2]) { rc = zl10353_write(fe, zl10353_reset_attach, sizeof(zl10353_reset_attach)); if (debug_regs) zl10353_dump_regs(fe); } return 0; } static int zl10353_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct zl10353_state *state = fe->demodulator_priv; u8 val = 0x0a; if (state->config.disable_i2c_gate_ctrl) { /* No tuner attached to the internal I2C bus */ /* If set enable I2C bridge, the main I2C bus stopped hardly */ return 0; } if (enable) val |= 0x10; return zl10353_single_write(fe, 0x62, val); } static void zl10353_release(struct dvb_frontend *fe) { struct zl10353_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops zl10353_ops; struct dvb_frontend *zl10353_attach(const struct zl10353_config *config, struct i2c_adapter *i2c) { struct zl10353_state *state = NULL; int id; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct zl10353_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->i2c = i2c; memcpy(&state->config, config, sizeof(struct zl10353_config)); /* check if the demod is there */ id = zl10353_read_register(state, CHIP_ID); if ((id != ID_ZL10353) && (id != ID_CE6230) && (id != ID_CE6231)) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &zl10353_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops zl10353_ops = { .info = { .name = "Zarlink ZL10353 DVB-T", .type = FE_OFDM, .frequency_min = 174000000, .frequency_max = 862000000, .frequency_stepsize = 166667, .frequency_tolerance = 0, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .release = zl10353_release, .init = zl10353_init, .sleep = zl10353_sleep, .i2c_gate_ctrl = zl10353_i2c_gate_ctrl, .write = zl10353_write, .set_frontend = zl10353_set_parameters, .get_frontend = zl10353_get_parameters, .get_tune_settings = zl10353_get_tune_settings, .read_status = zl10353_read_status, .read_ber = zl10353_read_ber, .read_signal_strength = zl10353_read_signal_strength, .read_snr = zl10353_read_snr, .read_ucblocks = zl10353_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); module_param(debug_regs, int, 0644); MODULE_PARM_DESC(debug_regs, "Turn on/off frontend register dumps (default:off)."); MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver"); MODULE_AUTHOR("Chris Pascoe"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(zl10353_attach);
gpl-2.0
ericwjr/wandboard_kernel
arch/powerpc/sysdev/msi_bitmap.c
4035
6651
/* * Copyright 2006-2008, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the * License. * */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/bitmap.h> #include <asm/msi_bitmap.h> int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num) { unsigned long flags; int offset, order = get_count_order(num); spin_lock_irqsave(&bmp->lock, flags); /* * This is fast, but stricter than we need. We might want to add * a fallback routine which does a linear search with no alignment. */ offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order); spin_unlock_irqrestore(&bmp->lock, flags); pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n", num, order, offset); return offset; } void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, unsigned int num) { unsigned long flags; int order = get_count_order(num); pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n", num, order, offset); spin_lock_irqsave(&bmp->lock, flags); bitmap_release_region(bmp->bitmap, offset, order); spin_unlock_irqrestore(&bmp->lock, flags); } void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq) { unsigned long flags; pr_debug("msi_bitmap: reserving hwirq 0x%x\n", hwirq); spin_lock_irqsave(&bmp->lock, flags); bitmap_allocate_region(bmp->bitmap, hwirq, 0); spin_unlock_irqrestore(&bmp->lock, flags); } /** * msi_bitmap_reserve_dt_hwirqs - Reserve irqs specified in the device tree. * @bmp: pointer to the MSI bitmap. * * Looks in the device tree to see if there is a property specifying which * irqs can be used for MSI. If found those irqs reserved in the device tree * are reserved in the bitmap. * * Returns 0 for success, < 0 if there was an error, and > 0 if no property * was found in the device tree. **/ int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp) { int i, j, len; const u32 *p; if (!bmp->of_node) return 1; p = of_get_property(bmp->of_node, "msi-available-ranges", &len); if (!p) { pr_debug("msi_bitmap: no msi-available-ranges property " \ "found on %s\n", bmp->of_node->full_name); return 1; } if (len % (2 * sizeof(u32)) != 0) { printk(KERN_WARNING "msi_bitmap: Malformed msi-available-ranges" " property on %s\n", bmp->of_node->full_name); return -EINVAL; } bitmap_allocate_region(bmp->bitmap, 0, get_count_order(bmp->irq_count)); spin_lock(&bmp->lock); /* Format is: (<u32 start> <u32 count>)+ */ len /= 2 * sizeof(u32); for (i = 0; i < len; i++, p += 2) { for (j = 0; j < *(p + 1); j++) bitmap_release_region(bmp->bitmap, *p + j, 0); } spin_unlock(&bmp->lock); return 0; } int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, struct device_node *of_node) { int size; if (!irq_count) return -EINVAL; size = BITS_TO_LONGS(irq_count) * sizeof(long); pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size); bmp->bitmap = zalloc_maybe_bootmem(size, GFP_KERNEL); if (!bmp->bitmap) { pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n"); return -ENOMEM; } /* We zalloc'ed the bitmap, so all irqs are free by default */ spin_lock_init(&bmp->lock); bmp->of_node = of_node_get(of_node); bmp->irq_count = irq_count; return 0; } void msi_bitmap_free(struct msi_bitmap *bmp) { /* we can't free the bitmap we don't know if it's bootmem etc. */ of_node_put(bmp->of_node); bmp->bitmap = NULL; } #ifdef CONFIG_MSI_BITMAP_SELFTEST #define check(x) \ if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__); void __init test_basics(void) { struct msi_bitmap bmp; int i, size = 512; /* Can't allocate a bitmap of 0 irqs */ check(msi_bitmap_alloc(&bmp, 0, NULL) != 0); /* of_node may be NULL */ check(0 == msi_bitmap_alloc(&bmp, size, NULL)); /* Should all be free by default */ check(0 == bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); /* With no node, there's no msi-available-ranges, so expect > 0 */ check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); /* Should all still be free */ check(0 == bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); /* Check we can fill it up and then no more */ for (i = 0; i < size; i++) check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); /* Should all be allocated */ check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0); /* And if we free one we can then allocate another */ msi_bitmap_free_hwirqs(&bmp, size / 2, 1); check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); msi_bitmap_free(&bmp); /* Clients may check bitmap == NULL for "not-allocated" */ check(bmp.bitmap == NULL); kfree(bmp.bitmap); } void __init test_of_node(void) { u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 }; const char *expected_str = "0-9,20-24,28-39,41-99,220-255"; char *prop_name = "msi-available-ranges"; char *node_name = "/fakenode"; struct device_node of_node; struct property prop; struct msi_bitmap bmp; int size = 256; DECLARE_BITMAP(expected, size); /* There should really be a struct device_node allocator */ memset(&of_node, 0, sizeof(of_node)); kref_init(&of_node.kref); of_node.full_name = node_name; check(0 == msi_bitmap_alloc(&bmp, size, &of_node)); /* No msi-available-ranges, so expect > 0 */ check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); /* Should all still be free */ check(0 == bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); /* Now create a fake msi-available-ranges property */ /* There should really .. oh whatever */ memset(&prop, 0, sizeof(prop)); prop.name = prop_name; prop.value = &prop_data; prop.length = sizeof(prop_data); of_node.properties = &prop; /* msi-available-ranges, so expect == 0 */ check(msi_bitmap_reserve_dt_hwirqs(&bmp) == 0); /* Check we got the expected result */ check(0 == bitmap_parselist(expected_str, expected, size)); check(bitmap_equal(expected, bmp.bitmap, size)); msi_bitmap_free(&bmp); kfree(bmp.bitmap); } int __init msi_bitmap_selftest(void) { printk(KERN_DEBUG "Running MSI bitmap self-tests ...\n"); test_basics(); test_of_node(); return 0; } late_initcall(msi_bitmap_selftest); #endif /* CONFIG_MSI_BITMAP_SELFTEST */
gpl-2.0
bbedward/ZenKernel_Flounder
arch/c6x/platforms/plldata.c
4547
11758
/* * Port on Texas Instruments TMS320C6x architecture * * Copyright (C) 2011 Texas Instruments Incorporated * Author: Mark Salter <msalter@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/clkdev.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/clock.h> #include <asm/setup.h> #include <asm/irq.h> /* * Common SoC clock support. */ /* Default input for PLL1 */ struct clk clkin1 = { .name = "clkin1", .node = LIST_HEAD_INIT(clkin1.node), .children = LIST_HEAD_INIT(clkin1.children), .childnode = LIST_HEAD_INIT(clkin1.childnode), }; struct pll_data c6x_soc_pll1 = { .num = 1, .sysclks = { { .name = "pll1", .parent = &clkin1, .pll_data = &c6x_soc_pll1, .flags = CLK_PLL, }, { .name = "pll1_sysclk1", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk2", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk3", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk4", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk5", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk6", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk7", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk8", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk9", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk10", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk11", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk12", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk13", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk14", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk15", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, { .name = "pll1_sysclk16", .parent = &c6x_soc_pll1.sysclks[0], .flags = CLK_PLL, }, }, }; /* CPU core clock */ struct clk c6x_core_clk = { .name = "core", }; /* miscellaneous IO clocks */ struct clk c6x_i2c_clk = { .name = "i2c", }; struct clk c6x_watchdog_clk = { .name = "watchdog", }; struct clk c6x_mcbsp1_clk = { .name = "mcbsp1", }; struct clk c6x_mcbsp2_clk = { .name = "mcbsp2", }; struct clk c6x_mdio_clk = { .name = "mdio", }; #ifdef CONFIG_SOC_TMS320C6455 static struct clk_lookup c6455_clks[] = { CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]), CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]), CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]), CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]), CLK(NULL, "core", &c6x_core_clk), CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), CLK("watchdog", NULL, &c6x_watchdog_clk), CLK("2c81800.mdio", NULL, &c6x_mdio_clk), CLK("", NULL, NULL) }; static void __init c6455_setup_clocks(struct device_node *node) { struct pll_data *pll = &c6x_soc_pll1; struct clk *sysclks = pll->sysclks; pll->flags = PLL_HAS_PRE | PLL_HAS_MUL; sysclks[2].flags |= FIXED_DIV_PLL; sysclks[2].div = 3; sysclks[3].flags |= FIXED_DIV_PLL; sysclks[3].div = 6; sysclks[4].div = PLLDIV4; sysclks[5].div = PLLDIV5; c6x_core_clk.parent = &sysclks[0]; c6x_i2c_clk.parent = &sysclks[3]; c6x_watchdog_clk.parent = &sysclks[3]; c6x_mdio_clk.parent = &sysclks[3]; c6x_clks_init(c6455_clks); } #endif /* CONFIG_SOC_TMS320C6455 */ #ifdef CONFIG_SOC_TMS320C6457 static struct clk_lookup c6457_clks[] = { CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]), CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]), CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]), CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]), CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]), CLK(NULL, "core", &c6x_core_clk), CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), CLK("watchdog", NULL, &c6x_watchdog_clk), CLK("2c81800.mdio", NULL, &c6x_mdio_clk), CLK("", NULL, NULL) }; static void __init c6457_setup_clocks(struct device_node *node) { struct pll_data *pll = &c6x_soc_pll1; struct clk *sysclks = pll->sysclks; pll->flags = PLL_HAS_MUL | PLL_HAS_POST; sysclks[1].flags |= FIXED_DIV_PLL; sysclks[1].div = 1; sysclks[2].flags |= FIXED_DIV_PLL; sysclks[2].div = 3; sysclks[3].flags |= FIXED_DIV_PLL; sysclks[3].div = 6; sysclks[4].div = PLLDIV4; sysclks[5].div = PLLDIV5; c6x_core_clk.parent = &sysclks[1]; c6x_i2c_clk.parent = &sysclks[3]; c6x_watchdog_clk.parent = &sysclks[5]; c6x_mdio_clk.parent = &sysclks[5]; c6x_clks_init(c6457_clks); } #endif /* CONFIG_SOC_TMS320C6455 */ #ifdef CONFIG_SOC_TMS320C6472 static struct clk_lookup c6472_clks[] = { CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]), CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]), CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]), CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]), CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]), CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]), CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]), CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]), CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]), CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]), CLK(NULL, "core", &c6x_core_clk), CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), CLK("watchdog", NULL, &c6x_watchdog_clk), CLK("2c81800.mdio", NULL, &c6x_mdio_clk), CLK("", NULL, NULL) }; /* assumptions used for delay loop calculations */ #define MIN_CLKIN1_KHz 15625 #define MAX_CORE_KHz 700000 #define MIN_PLLOUT_KHz MIN_CLKIN1_KHz static void __init c6472_setup_clocks(struct device_node *node) { struct pll_data *pll = &c6x_soc_pll1; struct clk *sysclks = pll->sysclks; int i; pll->flags = PLL_HAS_MUL; for (i = 1; i <= 6; i++) { sysclks[i].flags |= FIXED_DIV_PLL; sysclks[i].div = 1; } sysclks[7].flags |= FIXED_DIV_PLL; sysclks[7].div = 3; sysclks[8].flags |= FIXED_DIV_PLL; sysclks[8].div = 6; sysclks[9].flags |= FIXED_DIV_PLL; sysclks[9].div = 2; sysclks[10].div = PLLDIV10; c6x_core_clk.parent = &sysclks[get_coreid() + 1]; c6x_i2c_clk.parent = &sysclks[8]; c6x_watchdog_clk.parent = &sysclks[8]; c6x_mdio_clk.parent = &sysclks[5]; c6x_clks_init(c6472_clks); } #endif /* CONFIG_SOC_TMS320C6472 */ #ifdef CONFIG_SOC_TMS320C6474 static struct clk_lookup c6474_clks[] = { CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]), CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]), CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]), CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]), CLK(NULL, "pll1_sysclk12", &c6x_soc_pll1.sysclks[12]), CLK(NULL, "pll1_sysclk13", &c6x_soc_pll1.sysclks[13]), CLK(NULL, "core", &c6x_core_clk), CLK("i2c_davinci.1", NULL, &c6x_i2c_clk), CLK("mcbsp.1", NULL, &c6x_mcbsp1_clk), CLK("mcbsp.2", NULL, &c6x_mcbsp2_clk), CLK("watchdog", NULL, &c6x_watchdog_clk), CLK("2c81800.mdio", NULL, &c6x_mdio_clk), CLK("", NULL, NULL) }; static void __init c6474_setup_clocks(struct device_node *node) { struct pll_data *pll = &c6x_soc_pll1; struct clk *sysclks = pll->sysclks; pll->flags = PLL_HAS_MUL; sysclks[7].flags |= FIXED_DIV_PLL; sysclks[7].div = 1; sysclks[9].flags |= FIXED_DIV_PLL; sysclks[9].div = 3; sysclks[10].flags |= FIXED_DIV_PLL; sysclks[10].div = 6; sysclks[11].div = PLLDIV11; sysclks[12].flags |= FIXED_DIV_PLL; sysclks[12].div = 2; sysclks[13].div = PLLDIV13; c6x_core_clk.parent = &sysclks[7]; c6x_i2c_clk.parent = &sysclks[10]; c6x_watchdog_clk.parent = &sysclks[10]; c6x_mcbsp1_clk.parent = &sysclks[10]; c6x_mcbsp2_clk.parent = &sysclks[10]; c6x_clks_init(c6474_clks); } #endif /* CONFIG_SOC_TMS320C6474 */ #ifdef CONFIG_SOC_TMS320C6678 static struct clk_lookup c6678_clks[] = { CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]), CLK(NULL, "pll1_refclk", &c6x_soc_pll1.sysclks[1]), CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]), CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]), CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]), CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]), CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]), CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]), CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]), CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]), CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]), CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]), CLK(NULL, "core", &c6x_core_clk), CLK("", NULL, NULL) }; static void __init c6678_setup_clocks(struct device_node *node) { struct pll_data *pll = &c6x_soc_pll1; struct clk *sysclks = pll->sysclks; pll->flags = PLL_HAS_MUL; sysclks[1].flags |= FIXED_DIV_PLL; sysclks[1].div = 1; sysclks[2].div = PLLDIV2; sysclks[3].flags |= FIXED_DIV_PLL; sysclks[3].div = 2; sysclks[4].flags |= FIXED_DIV_PLL; sysclks[4].div = 3; sysclks[5].div = PLLDIV5; sysclks[6].flags |= FIXED_DIV_PLL; sysclks[6].div = 64; sysclks[7].flags |= FIXED_DIV_PLL; sysclks[7].div = 6; sysclks[8].div = PLLDIV8; sysclks[9].flags |= FIXED_DIV_PLL; sysclks[9].div = 12; sysclks[10].flags |= FIXED_DIV_PLL; sysclks[10].div = 3; sysclks[11].flags |= FIXED_DIV_PLL; sysclks[11].div = 6; c6x_core_clk.parent = &sysclks[0]; c6x_i2c_clk.parent = &sysclks[7]; c6x_clks_init(c6678_clks); } #endif /* CONFIG_SOC_TMS320C6678 */ static struct of_device_id c6x_clkc_match[] __initdata = { #ifdef CONFIG_SOC_TMS320C6455 { .compatible = "ti,c6455-pll", .data = c6455_setup_clocks }, #endif #ifdef CONFIG_SOC_TMS320C6457 { .compatible = "ti,c6457-pll", .data = c6457_setup_clocks }, #endif #ifdef CONFIG_SOC_TMS320C6472 { .compatible = "ti,c6472-pll", .data = c6472_setup_clocks }, #endif #ifdef CONFIG_SOC_TMS320C6474 { .compatible = "ti,c6474-pll", .data = c6474_setup_clocks }, #endif #ifdef CONFIG_SOC_TMS320C6678 { .compatible = "ti,c6678-pll", .data = c6678_setup_clocks }, #endif { .compatible = "ti,c64x+pll" }, {} }; void __init c64x_setup_clocks(void) { void (*__setup_clocks)(struct device_node *np); struct pll_data *pll = &c6x_soc_pll1; struct device_node *node; const struct of_device_id *id; int err; u32 val; node = of_find_matching_node(NULL, c6x_clkc_match); if (!node) return; pll->base = of_iomap(node, 0); if (!pll->base) goto out; err = of_property_read_u32(node, "clock-frequency", &val); if (err || val == 0) { pr_err("%s: no clock-frequency found! Using %dMHz\n", node->full_name, (int)val / 1000000); val = 25000000; } clkin1.rate = val; err = of_property_read_u32(node, "ti,c64x+pll-bypass-delay", &val); if (err) val = 5000; pll->bypass_delay = val; err = of_property_read_u32(node, "ti,c64x+pll-reset-delay", &val); if (err) val = 30000; pll->reset_delay = val; err = of_property_read_u32(node, "ti,c64x+pll-lock-delay", &val); if (err) val = 30000; pll->lock_delay = val; /* id->data is a pointer to SoC-specific setup */ id = of_match_node(c6x_clkc_match, node); if (id && id->data) { __setup_clocks = id->data; __setup_clocks(node); } out: of_node_put(node); }
gpl-2.0
ChaOSChriS/ChaOS-mako
drivers/regulator/max8925-regulator.c
4803
8875
/* * Regulators driver for Maxim max8925 * * Copyright (C) 2009 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/mfd/max8925.h> #define SD1_DVM_VMIN 850000 #define SD1_DVM_VMAX 1000000 #define SD1_DVM_STEP 50000 #define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */ #define SD1_DVM_EN 6 /* SDV1 bit 6 */ /* bit definitions in LDO control registers */ #define LDO_SEQ_I2C 0x7 /* Power U/D by i2c */ #define LDO_SEQ_MASK 0x7 /* Power U/D sequence mask */ #define LDO_SEQ_SHIFT 2 /* Power U/D sequence offset */ #define LDO_I2C_EN 0x1 /* Enable by i2c */ #define LDO_I2C_EN_MASK 0x1 /* Enable mask by i2c */ #define LDO_I2C_EN_SHIFT 0 /* Enable offset by i2c */ struct max8925_regulator_info { struct regulator_desc desc; struct regulator_dev *regulator; struct i2c_client *i2c; struct max8925_chip *chip; int min_uV; int max_uV; int step_uV; int vol_reg; int vol_shift; int vol_nbits; int enable_reg; }; static inline int check_range(struct max8925_regulator_info *info, int min_uV, int max_uV) { if (min_uV < info->min_uV || min_uV > info->max_uV) return -EINVAL; return 0; } static int max8925_list_voltage(struct regulator_dev *rdev, unsigned index) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); return info->min_uV + index * info->step_uV; } static int max8925_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned int *selector) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); unsigned char data, mask; if (check_range(info, min_uV, max_uV)) { dev_err(info->chip->dev, "invalid voltage range (%d, %d) uV\n", min_uV, max_uV); return -EINVAL; } data = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV); *selector = data; data <<= info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; return max8925_set_bits(info->i2c, info->vol_reg, mask, data); } static int max8925_get_voltage(struct regulator_dev *rdev) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); unsigned char data, mask; int ret; ret = max8925_reg_read(info->i2c, info->vol_reg); if (ret < 0) return ret; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; data = (ret & mask) >> info->vol_shift; return max8925_list_voltage(rdev, data); } static int max8925_enable(struct regulator_dev *rdev) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); return max8925_set_bits(info->i2c, info->enable_reg, LDO_SEQ_MASK << LDO_SEQ_SHIFT | LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT, LDO_SEQ_I2C << LDO_SEQ_SHIFT | LDO_I2C_EN << LDO_I2C_EN_SHIFT); } static int max8925_disable(struct regulator_dev *rdev) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); return max8925_set_bits(info->i2c, info->enable_reg, LDO_SEQ_MASK << LDO_SEQ_SHIFT | LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT, LDO_SEQ_I2C << LDO_SEQ_SHIFT); } static int max8925_is_enabled(struct regulator_dev *rdev) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); int ldo_seq, ret; ret = max8925_reg_read(info->i2c, info->enable_reg); if (ret < 0) return ret; ldo_seq = (ret >> LDO_SEQ_SHIFT) & LDO_SEQ_MASK; if (ldo_seq != LDO_SEQ_I2C) return 1; else return ret & (LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT); } static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); unsigned char data, mask; if (uV < SD1_DVM_VMIN || uV > SD1_DVM_VMAX) return -EINVAL; data = DIV_ROUND_UP(uV - SD1_DVM_VMIN, SD1_DVM_STEP); data <<= SD1_DVM_SHIFT; mask = 3 << SD1_DVM_SHIFT; return max8925_set_bits(info->i2c, info->enable_reg, mask, data); } static int max8925_set_dvm_enable(struct regulator_dev *rdev) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN, 1 << SD1_DVM_EN); } static int max8925_set_dvm_disable(struct regulator_dev *rdev) { struct max8925_regulator_info *info = rdev_get_drvdata(rdev); return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN, 0); } static struct regulator_ops max8925_regulator_sdv_ops = { .set_voltage = max8925_set_voltage, .get_voltage = max8925_get_voltage, .enable = max8925_enable, .disable = max8925_disable, .is_enabled = max8925_is_enabled, .set_suspend_voltage = max8925_set_dvm_voltage, .set_suspend_enable = max8925_set_dvm_enable, .set_suspend_disable = max8925_set_dvm_disable, }; static struct regulator_ops max8925_regulator_ldo_ops = { .set_voltage = max8925_set_voltage, .get_voltage = max8925_get_voltage, .enable = max8925_enable, .disable = max8925_disable, .is_enabled = max8925_is_enabled, }; #define MAX8925_SDV(_id, min, max, step) \ { \ .desc = { \ .name = "SDV" #_id, \ .ops = &max8925_regulator_sdv_ops, \ .type = REGULATOR_VOLTAGE, \ .id = MAX8925_ID_SD##_id, \ .owner = THIS_MODULE, \ }, \ .min_uV = min * 1000, \ .max_uV = max * 1000, \ .step_uV = step * 1000, \ .vol_reg = MAX8925_SDV##_id, \ .vol_shift = 0, \ .vol_nbits = 6, \ .enable_reg = MAX8925_SDCTL##_id, \ } #define MAX8925_LDO(_id, min, max, step) \ { \ .desc = { \ .name = "LDO" #_id, \ .ops = &max8925_regulator_ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .id = MAX8925_ID_LDO##_id, \ .owner = THIS_MODULE, \ }, \ .min_uV = min * 1000, \ .max_uV = max * 1000, \ .step_uV = step * 1000, \ .vol_reg = MAX8925_LDOVOUT##_id, \ .vol_shift = 0, \ .vol_nbits = 6, \ .enable_reg = MAX8925_LDOCTL##_id, \ } static struct max8925_regulator_info max8925_regulator_info[] = { MAX8925_SDV(1, 637.5, 1425, 12.5), MAX8925_SDV(2, 650, 2225, 25), MAX8925_SDV(3, 750, 3900, 50), MAX8925_LDO(1, 750, 3900, 50), MAX8925_LDO(2, 650, 2250, 25), MAX8925_LDO(3, 650, 2250, 25), MAX8925_LDO(4, 750, 3900, 50), MAX8925_LDO(5, 750, 3900, 50), MAX8925_LDO(6, 750, 3900, 50), MAX8925_LDO(7, 750, 3900, 50), MAX8925_LDO(8, 750, 3900, 50), MAX8925_LDO(9, 750, 3900, 50), MAX8925_LDO(10, 750, 3900, 50), MAX8925_LDO(11, 750, 3900, 50), MAX8925_LDO(12, 750, 3900, 50), MAX8925_LDO(13, 750, 3900, 50), MAX8925_LDO(14, 750, 3900, 50), MAX8925_LDO(15, 750, 3900, 50), MAX8925_LDO(16, 750, 3900, 50), MAX8925_LDO(17, 650, 2250, 25), MAX8925_LDO(18, 650, 2250, 25), MAX8925_LDO(19, 750, 3900, 50), MAX8925_LDO(20, 750, 3900, 50), }; static struct max8925_regulator_info * __devinit find_regulator_info(int id) { struct max8925_regulator_info *ri; int i; for (i = 0; i < ARRAY_SIZE(max8925_regulator_info); i++) { ri = &max8925_regulator_info[i]; if (ri->desc.id == id) return ri; } return NULL; } static int __devinit max8925_regulator_probe(struct platform_device *pdev) { struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); struct max8925_platform_data *pdata = chip->dev->platform_data; struct max8925_regulator_info *ri; struct regulator_dev *rdev; ri = find_regulator_info(pdev->id); if (ri == NULL) { dev_err(&pdev->dev, "invalid regulator ID specified\n"); return -EINVAL; } ri->i2c = chip->i2c; ri->chip = chip; rdev = regulator_register(&ri->desc, &pdev->dev, pdata->regulator[pdev->id], ri, NULL); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register regulator %s\n", ri->desc.name); return PTR_ERR(rdev); } platform_set_drvdata(pdev, rdev); return 0; } static int __devexit max8925_regulator_remove(struct platform_device *pdev) { struct regulator_dev *rdev = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); regulator_unregister(rdev); return 0; } static struct platform_driver max8925_regulator_driver = { .driver = { .name = "max8925-regulator", .owner = THIS_MODULE, }, .probe = max8925_regulator_probe, .remove = __devexit_p(max8925_regulator_remove), }; static int __init max8925_regulator_init(void) { return platform_driver_register(&max8925_regulator_driver); } subsys_initcall(max8925_regulator_init); static void __exit max8925_regulator_exit(void) { platform_driver_unregister(&max8925_regulator_driver); } module_exit(max8925_regulator_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_DESCRIPTION("Regulator Driver for Maxim 8925 PMIC"); MODULE_ALIAS("platform:max8925-regulator");
gpl-2.0
limitedgilin/d1lkt-buffer
drivers/media/video/s5p-jpeg/jpeg-core.c
4803
40496
/* linux/drivers/media/video/s5p-jpeg/jpeg-core.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "jpeg-core.h" #include "jpeg-hw.h" static struct s5p_jpeg_fmt formats_enc[] = { { .name = "JPEG JFIF", .fourcc = V4L2_PIX_FMT_JPEG, .colplanes = 1, .types = MEM2MEM_CAPTURE, }, { .name = "YUV 4:2:2 packed, YCbYCr", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .colplanes = 1, .types = MEM2MEM_OUTPUT, }, { .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565, .depth = 16, .colplanes = 1, .types = MEM2MEM_OUTPUT, }, }; #define NUM_FORMATS_ENC ARRAY_SIZE(formats_enc) static struct s5p_jpeg_fmt formats_dec[] = { { .name = "YUV 4:2:0 planar, YCbCr", .fourcc = V4L2_PIX_FMT_YUV420, .depth = 12, .colplanes = 3, .h_align = 4, .v_align = 4, .types = MEM2MEM_CAPTURE, }, { .name = "YUV 4:2:2 packed, YCbYCr", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .colplanes = 1, .h_align = 4, .v_align = 3, .types = MEM2MEM_CAPTURE, }, { .name = "JPEG JFIF", .fourcc = V4L2_PIX_FMT_JPEG, .colplanes = 1, .types = MEM2MEM_OUTPUT, }, }; #define NUM_FORMATS_DEC ARRAY_SIZE(formats_dec) static const unsigned char qtbl_luminance[4][64] = { {/* level 1 - high quality */ 8, 6, 6, 8, 12, 14, 16, 17, 6, 6, 6, 8, 10, 13, 12, 15, 6, 6, 7, 8, 13, 14, 18, 24, 8, 8, 8, 14, 13, 19, 24, 35, 12, 10, 13, 13, 20, 26, 34, 39, 14, 13, 14, 19, 26, 34, 39, 39, 16, 12, 18, 24, 34, 39, 39, 39, 17, 15, 24, 35, 39, 39, 39, 39 }, {/* level 2 */ 12, 8, 8, 12, 17, 21, 24, 23, 8, 9, 9, 11, 15, 19, 18, 23, 8, 9, 10, 12, 19, 20, 27, 36, 12, 11, 12, 21, 20, 28, 36, 53, 17, 15, 19, 20, 30, 39, 51, 59, 21, 19, 20, 28, 39, 51, 59, 59, 24, 18, 27, 36, 51, 59, 59, 59, 23, 23, 36, 53, 59, 59, 59, 59 }, {/* level 3 */ 16, 11, 11, 16, 23, 27, 31, 30, 11, 12, 12, 15, 20, 23, 23, 30, 11, 12, 13, 16, 23, 26, 35, 47, 16, 15, 16, 23, 26, 37, 47, 64, 23, 20, 23, 26, 39, 51, 64, 64, 27, 23, 26, 37, 51, 64, 64, 64, 31, 23, 35, 47, 64, 64, 64, 64, 30, 30, 47, 64, 64, 64, 64, 64 }, {/*level 4 - low quality */ 20, 16, 25, 39, 50, 46, 62, 68, 16, 18, 23, 38, 38, 53, 65, 68, 25, 23, 31, 38, 53, 65, 68, 68, 39, 38, 38, 53, 65, 68, 68, 68, 50, 38, 53, 65, 68, 68, 68, 68, 46, 53, 65, 68, 68, 68, 68, 68, 62, 65, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68 } }; static const unsigned char qtbl_chrominance[4][64] = { {/* level 1 - high quality */ 9, 8, 9, 11, 14, 17, 19, 24, 8, 10, 9, 11, 14, 13, 17, 22, 9, 9, 13, 14, 13, 15, 23, 26, 11, 11, 14, 14, 15, 20, 26, 33, 14, 14, 13, 15, 20, 24, 33, 39, 17, 13, 15, 20, 24, 32, 39, 39, 19, 17, 23, 26, 33, 39, 39, 39, 24, 22, 26, 33, 39, 39, 39, 39 }, {/* level 2 */ 13, 11, 13, 16, 20, 20, 29, 37, 11, 14, 14, 14, 16, 20, 26, 32, 13, 14, 15, 17, 20, 23, 35, 40, 16, 14, 17, 21, 23, 30, 40, 50, 20, 16, 20, 23, 30, 37, 50, 59, 20, 20, 23, 30, 37, 48, 59, 59, 29, 26, 35, 40, 50, 59, 59, 59, 37, 32, 40, 50, 59, 59, 59, 59 }, {/* level 3 */ 17, 15, 17, 21, 20, 26, 38, 48, 15, 19, 18, 17, 20, 26, 35, 43, 17, 18, 20, 22, 26, 30, 46, 53, 21, 17, 22, 28, 30, 39, 53, 64, 20, 20, 26, 30, 39, 48, 64, 64, 26, 26, 30, 39, 48, 63, 64, 64, 38, 35, 46, 53, 64, 64, 64, 64, 48, 43, 53, 64, 64, 64, 64, 64 }, {/*level 4 - low quality */ 21, 25, 32, 38, 54, 68, 68, 68, 25, 28, 24, 38, 54, 68, 68, 68, 32, 24, 32, 43, 66, 68, 68, 68, 38, 38, 43, 53, 68, 68, 68, 68, 54, 54, 66, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68 } }; static const unsigned char hdctbl0[16] = { 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }; static const unsigned char hdctblg0[12] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb }; static const unsigned char hactbl0[16] = { 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }; static const unsigned char hactblg0[162] = { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c) { return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler); } static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh) { return container_of(fh, struct s5p_jpeg_ctx, fh); } static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl, unsigned long tab, int len) { int i; for (i = 0; i < len; i++) writel((unsigned int)qtbl[i], regs + tab + (i * 0x04)); } static inline void jpeg_set_qtbl_lum(void __iomem *regs, int quality) { /* this driver fills quantisation table 0 with data for luma */ jpeg_set_qtbl(regs, qtbl_luminance[quality], S5P_JPG_QTBL_CONTENT(0), ARRAY_SIZE(qtbl_luminance[quality])); } static inline void jpeg_set_qtbl_chr(void __iomem *regs, int quality) { /* this driver fills quantisation table 1 with data for chroma */ jpeg_set_qtbl(regs, qtbl_chrominance[quality], S5P_JPG_QTBL_CONTENT(1), ARRAY_SIZE(qtbl_chrominance[quality])); } static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl, unsigned long tab, int len) { int i; for (i = 0; i < len; i++) writel((unsigned int)htbl[i], regs + tab + (i * 0x04)); } static inline void jpeg_set_hdctbl(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0), ARRAY_SIZE(hdctbl0)); } static inline void jpeg_set_hdctblg(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0), ARRAY_SIZE(hdctblg0)); } static inline void jpeg_set_hactbl(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0), ARRAY_SIZE(hactbl0)); } static inline void jpeg_set_hactblg(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0), ARRAY_SIZE(hactblg0)); } /* * ============================================================================ * Device file operations * ============================================================================ */ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq); static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode, __u32 pixelformat); static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx); static int s5p_jpeg_open(struct file *file) { struct s5p_jpeg *jpeg = video_drvdata(file); struct video_device *vfd = video_devdata(file); struct s5p_jpeg_ctx *ctx; struct s5p_jpeg_fmt *out_fmt; int ret = 0; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; v4l2_fh_init(&ctx->fh, vfd); /* Use separate control handler per file handle */ ctx->fh.ctrl_handler = &ctx->ctrl_handler; file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->jpeg = jpeg; if (vfd == jpeg->vfd_encoder) { ctx->mode = S5P_JPEG_ENCODE; out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_RGB565); } else { ctx->mode = S5P_JPEG_DECODE; out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_JPEG); } ret = s5p_jpeg_controls_create(ctx); if (ret < 0) goto error; ctx->m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init); if (IS_ERR(ctx->m2m_ctx)) { ret = PTR_ERR(ctx->m2m_ctx); goto error; } ctx->out_q.fmt = out_fmt; ctx->cap_q.fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_YUYV); return 0; error: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return ret; } static int s5p_jpeg_release(struct file *file) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); v4l2_m2m_ctx_release(ctx->m2m_ctx); v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return 0; } static unsigned int s5p_jpeg_poll(struct file *file, struct poll_table_struct *wait) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); return v4l2_m2m_poll(file, ctx->m2m_ctx, wait); } static int s5p_jpeg_mmap(struct file *file, struct vm_area_struct *vma) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); } static const struct v4l2_file_operations s5p_jpeg_fops = { .owner = THIS_MODULE, .open = s5p_jpeg_open, .release = s5p_jpeg_release, .poll = s5p_jpeg_poll, .unlocked_ioctl = video_ioctl2, .mmap = s5p_jpeg_mmap, }; /* * ============================================================================ * video ioctl operations * ============================================================================ */ static int get_byte(struct s5p_jpeg_buffer *buf) { if (buf->curr >= buf->size) return -1; return ((unsigned char *)buf->data)[buf->curr++]; } static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word) { unsigned int temp; int byte; byte = get_byte(buf); if (byte == -1) return -1; temp = byte << 8; byte = get_byte(buf); if (byte == -1) return -1; *word = (unsigned int)byte | temp; return 0; } static void skip(struct s5p_jpeg_buffer *buf, long len) { if (len <= 0) return; while (len--) get_byte(buf); } static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, unsigned long buffer, unsigned long size) { int c, components, notfound; unsigned int height, width, word; long length; struct s5p_jpeg_buffer jpeg_buffer; jpeg_buffer.size = size; jpeg_buffer.data = buffer; jpeg_buffer.curr = 0; notfound = 1; while (notfound) { c = get_byte(&jpeg_buffer); if (c == -1) break; if (c != 0xff) continue; do c = get_byte(&jpeg_buffer); while (c == 0xff); if (c == -1) break; if (c == 0) continue; length = 0; switch (c) { /* SOF0: baseline JPEG */ case SOF0: if (get_word_be(&jpeg_buffer, &word)) break; if (get_byte(&jpeg_buffer) == -1) break; if (get_word_be(&jpeg_buffer, &height)) break; if (get_word_be(&jpeg_buffer, &width)) break; components = get_byte(&jpeg_buffer); if (components == -1) break; notfound = 0; skip(&jpeg_buffer, components * 3); break; /* skip payload-less markers */ case RST ... RST + 7: case SOI: case EOI: case TEM: break; /* skip uninteresting payload markers */ default: if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; skip(&jpeg_buffer, length); break; } } result->w = width; result->h = height; result->size = components; return !notfound; } static int s5p_jpeg_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) { strlcpy(cap->driver, S5P_JPEG_M2M_NAME " encoder", sizeof(cap->driver)); strlcpy(cap->card, S5P_JPEG_M2M_NAME " encoder", sizeof(cap->card)); } else { strlcpy(cap->driver, S5P_JPEG_M2M_NAME " decoder", sizeof(cap->driver)); strlcpy(cap->card, S5P_JPEG_M2M_NAME " decoder", sizeof(cap->card)); } cap->bus_info[0] = 0; cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT; return 0; } static int enum_fmt(struct s5p_jpeg_fmt *formats, int n, struct v4l2_fmtdesc *f, u32 type) { int i, num = 0; for (i = 0; i < n; ++i) { if (formats[i].types & type) { /* index-th format of type type found ? */ if (num == f->index) break; /* Correct type but haven't reached our index yet, * just increment per-type index */ ++num; } } /* Format not found */ if (i >= n) return -EINVAL; strlcpy(f->description, formats[i].name, sizeof(f->description)); f->pixelformat = formats[i].fourcc; return 0; } static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) return enum_fmt(formats_enc, NUM_FORMATS_ENC, f, MEM2MEM_CAPTURE); return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_CAPTURE); } static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) return enum_fmt(formats_enc, NUM_FORMATS_ENC, f, MEM2MEM_OUTPUT); return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_OUTPUT); } static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx, enum v4l2_buf_type type) { if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return &ctx->out_q; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return &ctx->cap_q; return NULL; } static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct vb2_queue *vq; struct s5p_jpeg_q_data *q_data = NULL; struct v4l2_pix_format *pix = &f->fmt.pix; struct s5p_jpeg_ctx *ct = fh_to_ctx(priv); vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type); if (!vq) return -EINVAL; if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed) return -EINVAL; q_data = get_q_data(ct, f->type); BUG_ON(q_data == NULL); pix->width = q_data->w; pix->height = q_data->h; pix->field = V4L2_FIELD_NONE; pix->pixelformat = q_data->fmt->fourcc; pix->bytesperline = 0; if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) { u32 bpl = q_data->w; if (q_data->fmt->colplanes == 1) bpl = (bpl * q_data->fmt->depth) >> 3; pix->bytesperline = bpl; } pix->sizeimage = q_data->size; return 0; } static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode, u32 pixelformat) { unsigned int k; struct s5p_jpeg_fmt *formats; int n; if (mode == S5P_JPEG_ENCODE) { formats = formats_enc; n = NUM_FORMATS_ENC; } else { formats = formats_dec; n = NUM_FORMATS_DEC; } for (k = 0; k < n; k++) { struct s5p_jpeg_fmt *fmt = &formats[k]; if (fmt->fourcc == pixelformat) return fmt; } return NULL; } static void jpeg_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, unsigned int walign, u32 *h, unsigned int hmin, unsigned int hmax, unsigned int halign) { int width, height, w_step, h_step; width = *w; height = *h; w_step = 1 << walign; h_step = 1 << halign; v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0); if (*w < width && (*w + w_step) < wmax) *w += w_step; if (*h < height && (*h + h_step) < hmax) *h += h_step; } static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt, struct s5p_jpeg_ctx *ctx, int q_type) { struct v4l2_pix_format *pix = &f->fmt.pix; if (pix->field == V4L2_FIELD_ANY) pix->field = V4L2_FIELD_NONE; else if (pix->field != V4L2_FIELD_NONE) return -EINVAL; /* V4L2 specification suggests the driver corrects the format struct * if any of the dimensions is unsupported */ if (q_type == MEM2MEM_OUTPUT) jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH, S5P_JPEG_MAX_WIDTH, 0, &pix->height, S5P_JPEG_MIN_HEIGHT, S5P_JPEG_MAX_HEIGHT, 0); else jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH, S5P_JPEG_MAX_WIDTH, fmt->h_align, &pix->height, S5P_JPEG_MIN_HEIGHT, S5P_JPEG_MAX_HEIGHT, fmt->v_align); if (fmt->fourcc == V4L2_PIX_FMT_JPEG) { if (pix->sizeimage <= 0) pix->sizeimage = PAGE_SIZE; pix->bytesperline = 0; } else { u32 bpl = pix->bytesperline; if (fmt->colplanes > 1 && bpl < pix->width) bpl = pix->width; /* planar */ if (fmt->colplanes == 1 && /* packed */ (bpl << 3) * fmt->depth < pix->width) bpl = (pix->width * fmt->depth) >> 3; pix->bytesperline = bpl; pix->sizeimage = (pix->width * pix->height * fmt->depth) >> 3; } return 0; } static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); struct s5p_jpeg_fmt *fmt; fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat); if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->jpeg->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_CAPTURE); } static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); struct s5p_jpeg_fmt *fmt; fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat); if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->jpeg->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_OUTPUT); } static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f) { struct vb2_queue *vq; struct s5p_jpeg_q_data *q_data = NULL; struct v4l2_pix_format *pix = &f->fmt.pix; vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(ct, f->type); BUG_ON(q_data == NULL); if (vb2_is_busy(vq)) { v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } q_data->fmt = s5p_jpeg_find_format(ct->mode, pix->pixelformat); q_data->w = pix->width; q_data->h = pix->height; if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3; else q_data->size = pix->sizeimage; return 0; } static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = s5p_jpeg_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return s5p_jpeg_s_fmt(fh_to_ctx(priv), f); } static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = s5p_jpeg_try_fmt_vid_out(file, priv, f); if (ret) return ret; return s5p_jpeg_s_fmt(fh_to_ctx(priv), f); } static int s5p_jpeg_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } static int s5p_jpeg_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } static int s5p_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } static int s5p_jpeg_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } static int s5p_jpeg_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } static int s5p_jpeg_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); } int s5p_jpeg_g_selection(struct file *file, void *priv, struct v4l2_selection *s) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; /* For JPEG blob active == default == bounds */ switch (s->target) { case V4L2_SEL_TGT_CROP_ACTIVE: case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_COMPOSE_ACTIVE: case V4L2_SEL_TGT_COMPOSE_DEFAULT: s->r.width = ctx->out_q.w; s->r.height = ctx->out_q.h; break; case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_PADDED: s->r.width = ctx->cap_q.w; s->r.height = ctx->cap_q.h; break; default: return -EINVAL; } s->r.left = 0; s->r.top = 0; return 0; } /* * V4L2 controls */ static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl); struct s5p_jpeg *jpeg = ctx->jpeg; unsigned long flags; switch (ctrl->id) { case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: spin_lock_irqsave(&jpeg->slock, flags); WARN_ON(ctx->subsampling > S5P_SUBSAMPLING_MODE_GRAY); if (ctx->subsampling > 2) ctrl->val = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; else ctrl->val = ctx->subsampling; spin_unlock_irqrestore(&jpeg->slock, flags); break; } return 0; } static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl); unsigned long flags; spin_lock_irqsave(&ctx->jpeg->slock, flags); switch (ctrl->id) { case V4L2_CID_JPEG_COMPRESSION_QUALITY: ctx->compr_quality = S5P_JPEG_COMPR_QUAL_WORST - ctrl->val; break; case V4L2_CID_JPEG_RESTART_INTERVAL: ctx->restart_interval = ctrl->val; break; case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: ctx->subsampling = ctrl->val; break; } spin_unlock_irqrestore(&ctx->jpeg->slock, flags); return 0; } static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = { .g_volatile_ctrl = s5p_jpeg_g_volatile_ctrl, .s_ctrl = s5p_jpeg_s_ctrl, }; static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx) { unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */ struct v4l2_ctrl *ctrl; v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3); if (ctx->mode == S5P_JPEG_ENCODE) { v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, 3, 1, 3); v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_RESTART_INTERVAL, 0, 3, 0xffff, 0); mask = ~0x06; /* 422, 420 */ } ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_CHROMA_SUBSAMPLING, V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask, V4L2_JPEG_CHROMA_SUBSAMPLING_422); if (ctx->ctrl_handler.error) return ctx->ctrl_handler.error; if (ctx->mode == S5P_JPEG_DECODE) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; return 0; } static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = { .vidioc_querycap = s5p_jpeg_querycap, .vidioc_enum_fmt_vid_cap = s5p_jpeg_enum_fmt_vid_cap, .vidioc_enum_fmt_vid_out = s5p_jpeg_enum_fmt_vid_out, .vidioc_g_fmt_vid_cap = s5p_jpeg_g_fmt, .vidioc_g_fmt_vid_out = s5p_jpeg_g_fmt, .vidioc_try_fmt_vid_cap = s5p_jpeg_try_fmt_vid_cap, .vidioc_try_fmt_vid_out = s5p_jpeg_try_fmt_vid_out, .vidioc_s_fmt_vid_cap = s5p_jpeg_s_fmt_vid_cap, .vidioc_s_fmt_vid_out = s5p_jpeg_s_fmt_vid_out, .vidioc_reqbufs = s5p_jpeg_reqbufs, .vidioc_querybuf = s5p_jpeg_querybuf, .vidioc_qbuf = s5p_jpeg_qbuf, .vidioc_dqbuf = s5p_jpeg_dqbuf, .vidioc_streamon = s5p_jpeg_streamon, .vidioc_streamoff = s5p_jpeg_streamoff, .vidioc_g_selection = s5p_jpeg_g_selection, }; /* * ============================================================================ * mem2mem callbacks * ============================================================================ */ static void s5p_jpeg_device_run(void *priv) { struct s5p_jpeg_ctx *ctx = priv; struct s5p_jpeg *jpeg = ctx->jpeg; struct vb2_buffer *src_buf, *dst_buf; unsigned long src_addr, dst_addr; src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); jpeg_reset(jpeg->regs); jpeg_poweron(jpeg->regs); jpeg_proc_mode(jpeg->regs, ctx->mode); if (ctx->mode == S5P_JPEG_ENCODE) { if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565) jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_565); else jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_422); jpeg_subsampling_mode(jpeg->regs, ctx->subsampling); jpeg_dri(jpeg->regs, ctx->restart_interval); jpeg_x(jpeg->regs, ctx->out_q.w); jpeg_y(jpeg->regs, ctx->out_q.h); jpeg_imgadr(jpeg->regs, src_addr); jpeg_jpgadr(jpeg->regs, dst_addr); /* ultimately comes from sizeimage from userspace */ jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size); /* JPEG RGB to YCbCr conversion matrix */ jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11); jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12); jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13); jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21); jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22); jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23); jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31); jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32); jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33); /* * JPEG IP allows storing 4 quantization tables * We fill table 0 for luma and table 1 for chroma */ jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality); jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality); /* use table 0 for Y */ jpeg_qtbl(jpeg->regs, 1, 0); /* use table 1 for Cb and Cr*/ jpeg_qtbl(jpeg->regs, 2, 1); jpeg_qtbl(jpeg->regs, 3, 1); /* Y, Cb, Cr use Huffman table 0 */ jpeg_htbl_ac(jpeg->regs, 1); jpeg_htbl_dc(jpeg->regs, 1); jpeg_htbl_ac(jpeg->regs, 2); jpeg_htbl_dc(jpeg->regs, 2); jpeg_htbl_ac(jpeg->regs, 3); jpeg_htbl_dc(jpeg->regs, 3); } else { /* S5P_JPEG_DECODE */ jpeg_rst_int_enable(jpeg->regs, true); jpeg_data_num_int_enable(jpeg->regs, true); jpeg_final_mcu_num_int_enable(jpeg->regs, true); if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV) jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422); else jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420); jpeg_jpgadr(jpeg->regs, src_addr); jpeg_imgadr(jpeg->regs, dst_addr); } jpeg_start(jpeg->regs); } static int s5p_jpeg_job_ready(void *priv) { struct s5p_jpeg_ctx *ctx = priv; if (ctx->mode == S5P_JPEG_DECODE) return ctx->hdr_parsed; return 1; } static void s5p_jpeg_job_abort(void *priv) { } static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = { .device_run = s5p_jpeg_device_run, .job_ready = s5p_jpeg_job_ready, .job_abort = s5p_jpeg_job_abort, }; /* * ============================================================================ * Queue operations * ============================================================================ */ static int s5p_jpeg_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); struct s5p_jpeg_q_data *q_data = NULL; unsigned int size, count = *nbuffers; q_data = get_q_data(ctx, vq->type); BUG_ON(q_data == NULL); size = q_data->size; /* * header is parsed during decoding and parsed information stored * in the context so we do not allow another buffer to overwrite it */ if (ctx->mode == S5P_JPEG_DECODE) count = 1; *nbuffers = count; *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = ctx->jpeg->alloc_ctx; return 0; } static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct s5p_jpeg_q_data *q_data = NULL; q_data = get_q_data(ctx, vb->vb2_queue->type); BUG_ON(q_data == NULL); if (vb2_plane_size(vb, 0) < q_data->size) { pr_err("%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), (long)q_data->size); return -EINVAL; } vb2_set_plane_payload(vb, 0, q_data->size); return 0; } static void s5p_jpeg_buf_queue(struct vb2_buffer *vb) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); if (ctx->mode == S5P_JPEG_DECODE && vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { struct s5p_jpeg_q_data tmp, *q_data; ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp, (unsigned long)vb2_plane_vaddr(vb, 0), min((unsigned long)ctx->out_q.size, vb2_get_plane_payload(vb, 0))); if (!ctx->hdr_parsed) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); return; } q_data = &ctx->out_q; q_data->w = tmp.w; q_data->h = tmp.h; q_data = &ctx->cap_q; q_data->w = tmp.w; q_data->h = tmp.h; jpeg_bound_align_image(&q_data->w, S5P_JPEG_MIN_WIDTH, S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align, &q_data->h, S5P_JPEG_MIN_HEIGHT, S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align ); q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3; } if (ctx->m2m_ctx) v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); } static void s5p_jpeg_wait_prepare(struct vb2_queue *vq) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); mutex_unlock(&ctx->jpeg->lock); } static void s5p_jpeg_wait_finish(struct vb2_queue *vq) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); mutex_lock(&ctx->jpeg->lock); } static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); int ret; ret = pm_runtime_get_sync(ctx->jpeg->dev); return ret > 0 ? 0 : ret; } static int s5p_jpeg_stop_streaming(struct vb2_queue *q) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); pm_runtime_put(ctx->jpeg->dev); return 0; } static struct vb2_ops s5p_jpeg_qops = { .queue_setup = s5p_jpeg_queue_setup, .buf_prepare = s5p_jpeg_buf_prepare, .buf_queue = s5p_jpeg_buf_queue, .wait_prepare = s5p_jpeg_wait_prepare, .wait_finish = s5p_jpeg_wait_finish, .start_streaming = s5p_jpeg_start_streaming, .stop_streaming = s5p_jpeg_stop_streaming, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct s5p_jpeg_ctx *ctx = priv; int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_USERPTR; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &s5p_jpeg_qops; src_vq->mem_ops = &vb2_dma_contig_memops; ret = vb2_queue_init(src_vq); if (ret) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &s5p_jpeg_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; return vb2_queue_init(dst_vq); } /* * ============================================================================ * ISR * ============================================================================ */ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id) { struct s5p_jpeg *jpeg = dev_id; struct s5p_jpeg_ctx *curr_ctx; struct vb2_buffer *src_buf, *dst_buf; unsigned long payload_size = 0; enum vb2_buffer_state state = VB2_BUF_STATE_DONE; bool enc_jpeg_too_large = false; bool timer_elapsed = false; bool op_completed = false; spin_lock(&jpeg->slock); curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev); src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); if (curr_ctx->mode == S5P_JPEG_ENCODE) enc_jpeg_too_large = jpeg_enc_stream_stat(jpeg->regs); timer_elapsed = jpeg_timer_stat(jpeg->regs); op_completed = jpeg_result_stat_ok(jpeg->regs); if (curr_ctx->mode == S5P_JPEG_DECODE) op_completed = op_completed && jpeg_stream_stat_ok(jpeg->regs); if (enc_jpeg_too_large) { state = VB2_BUF_STATE_ERROR; jpeg_clear_enc_stream_stat(jpeg->regs); } else if (timer_elapsed) { state = VB2_BUF_STATE_ERROR; jpeg_clear_timer_stat(jpeg->regs); } else if (!op_completed) { state = VB2_BUF_STATE_ERROR; } else { payload_size = jpeg_compressed_size(jpeg->regs); } v4l2_m2m_buf_done(src_buf, state); if (curr_ctx->mode == S5P_JPEG_ENCODE) vb2_set_plane_payload(dst_buf, 0, payload_size); v4l2_m2m_buf_done(dst_buf, state); v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->m2m_ctx); curr_ctx->subsampling = jpeg_get_subsampling_mode(jpeg->regs); spin_unlock(&jpeg->slock); jpeg_clear_int(jpeg->regs); return IRQ_HANDLED; } /* * ============================================================================ * Driver basic infrastructure * ============================================================================ */ static int s5p_jpeg_probe(struct platform_device *pdev) { struct s5p_jpeg *jpeg; struct resource *res; int ret; /* JPEG IP abstraction struct */ jpeg = kzalloc(sizeof(struct s5p_jpeg), GFP_KERNEL); if (!jpeg) return -ENOMEM; mutex_init(&jpeg->lock); spin_lock_init(&jpeg->slock); jpeg->dev = &pdev->dev; /* memory-mapped registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot find IO resource\n"); ret = -ENOENT; goto jpeg_alloc_rollback; } jpeg->ioarea = request_mem_region(res->start, resource_size(res), pdev->name); if (!jpeg->ioarea) { dev_err(&pdev->dev, "cannot request IO\n"); ret = -ENXIO; goto jpeg_alloc_rollback; } jpeg->regs = ioremap(res->start, resource_size(res)); if (!jpeg->regs) { dev_err(&pdev->dev, "cannot map IO\n"); ret = -ENXIO; goto mem_region_rollback; } dev_dbg(&pdev->dev, "registers %p (%p, %p)\n", jpeg->regs, jpeg->ioarea, res); /* interrupt service routine registration */ jpeg->irq = ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(&pdev->dev, "cannot find IRQ\n"); goto ioremap_rollback; } ret = request_irq(jpeg->irq, s5p_jpeg_irq, 0, dev_name(&pdev->dev), jpeg); if (ret) { dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq); goto ioremap_rollback; } /* clocks */ jpeg->clk = clk_get(&pdev->dev, "jpeg"); if (IS_ERR(jpeg->clk)) { dev_err(&pdev->dev, "cannot get clock\n"); ret = PTR_ERR(jpeg->clk); goto request_irq_rollback; } dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk); clk_enable(jpeg->clk); /* v4l2 device */ ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev); if (ret) { dev_err(&pdev->dev, "Failed to register v4l2 device\n"); goto clk_get_rollback; } /* mem2mem device */ jpeg->m2m_dev = v4l2_m2m_init(&s5p_jpeg_m2m_ops); if (IS_ERR(jpeg->m2m_dev)) { v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(jpeg->m2m_dev); goto device_register_rollback; } jpeg->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(jpeg->alloc_ctx)) { v4l2_err(&jpeg->v4l2_dev, "Failed to init memory allocator\n"); ret = PTR_ERR(jpeg->alloc_ctx); goto m2m_init_rollback; } /* JPEG encoder /dev/videoX node */ jpeg->vfd_encoder = video_device_alloc(); if (!jpeg->vfd_encoder) { v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto vb2_allocator_rollback; } strlcpy(jpeg->vfd_encoder->name, S5P_JPEG_M2M_NAME, sizeof(jpeg->vfd_encoder->name)); jpeg->vfd_encoder->fops = &s5p_jpeg_fops; jpeg->vfd_encoder->ioctl_ops = &s5p_jpeg_ioctl_ops; jpeg->vfd_encoder->minor = -1; jpeg->vfd_encoder->release = video_device_release; jpeg->vfd_encoder->lock = &jpeg->lock; jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev; ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n"); goto enc_vdev_alloc_rollback; } video_set_drvdata(jpeg->vfd_encoder, jpeg); v4l2_info(&jpeg->v4l2_dev, "encoder device registered as /dev/video%d\n", jpeg->vfd_encoder->num); /* JPEG decoder /dev/videoX node */ jpeg->vfd_decoder = video_device_alloc(); if (!jpeg->vfd_decoder) { v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto enc_vdev_register_rollback; } strlcpy(jpeg->vfd_decoder->name, S5P_JPEG_M2M_NAME, sizeof(jpeg->vfd_decoder->name)); jpeg->vfd_decoder->fops = &s5p_jpeg_fops; jpeg->vfd_decoder->ioctl_ops = &s5p_jpeg_ioctl_ops; jpeg->vfd_decoder->minor = -1; jpeg->vfd_decoder->release = video_device_release; jpeg->vfd_decoder->lock = &jpeg->lock; jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev; ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n"); goto dec_vdev_alloc_rollback; } video_set_drvdata(jpeg->vfd_decoder, jpeg); v4l2_info(&jpeg->v4l2_dev, "decoder device registered as /dev/video%d\n", jpeg->vfd_decoder->num); /* final statements & power management */ platform_set_drvdata(pdev, jpeg); pm_runtime_enable(&pdev->dev); v4l2_info(&jpeg->v4l2_dev, "Samsung S5P JPEG codec\n"); return 0; dec_vdev_alloc_rollback: video_device_release(jpeg->vfd_decoder); enc_vdev_register_rollback: video_unregister_device(jpeg->vfd_encoder); enc_vdev_alloc_rollback: video_device_release(jpeg->vfd_encoder); vb2_allocator_rollback: vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx); m2m_init_rollback: v4l2_m2m_release(jpeg->m2m_dev); device_register_rollback: v4l2_device_unregister(&jpeg->v4l2_dev); clk_get_rollback: clk_disable(jpeg->clk); clk_put(jpeg->clk); request_irq_rollback: free_irq(jpeg->irq, jpeg); ioremap_rollback: iounmap(jpeg->regs); mem_region_rollback: release_resource(jpeg->ioarea); release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea)); jpeg_alloc_rollback: kfree(jpeg); return ret; } static int s5p_jpeg_remove(struct platform_device *pdev) { struct s5p_jpeg *jpeg = platform_get_drvdata(pdev); pm_runtime_disable(jpeg->dev); video_unregister_device(jpeg->vfd_decoder); video_device_release(jpeg->vfd_decoder); video_unregister_device(jpeg->vfd_encoder); video_device_release(jpeg->vfd_encoder); vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx); v4l2_m2m_release(jpeg->m2m_dev); v4l2_device_unregister(&jpeg->v4l2_dev); clk_disable(jpeg->clk); clk_put(jpeg->clk); free_irq(jpeg->irq, jpeg); iounmap(jpeg->regs); release_resource(jpeg->ioarea); release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea)); kfree(jpeg); return 0; } static int s5p_jpeg_runtime_suspend(struct device *dev) { return 0; } static int s5p_jpeg_runtime_resume(struct device *dev) { struct s5p_jpeg *jpeg = dev_get_drvdata(dev); /* * JPEG IP allows storing two Huffman tables for each component * We fill table 0 for each component */ jpeg_set_hdctbl(jpeg->regs); jpeg_set_hdctblg(jpeg->regs); jpeg_set_hactbl(jpeg->regs); jpeg_set_hactblg(jpeg->regs); return 0; } static const struct dev_pm_ops s5p_jpeg_pm_ops = { .runtime_suspend = s5p_jpeg_runtime_suspend, .runtime_resume = s5p_jpeg_runtime_resume, }; static struct platform_driver s5p_jpeg_driver = { .probe = s5p_jpeg_probe, .remove = s5p_jpeg_remove, .driver = { .owner = THIS_MODULE, .name = S5P_JPEG_M2M_NAME, .pm = &s5p_jpeg_pm_ops, }, }; static int __init s5p_jpeg_register(void) { int ret; pr_info("S5P JPEG V4L2 Driver, (c) 2011 Samsung Electronics\n"); ret = platform_driver_register(&s5p_jpeg_driver); if (ret) pr_err("%s: failed to register jpeg driver\n", __func__); return ret; } static void __exit s5p_jpeg_unregister(void) { platform_driver_unregister(&s5p_jpeg_driver); } module_init(s5p_jpeg_register); module_exit(s5p_jpeg_unregister); MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>"); MODULE_DESCRIPTION("Samsung JPEG codec driver"); MODULE_LICENSE("GPL");
gpl-2.0
SyNtheticNightmar3/android_kernel_asus_flo
net/tipc/port.c
4803
33105
/* * net/tipc/port.c: TIPC port code * * Copyright (c) 1992-2007, Ericsson AB * Copyright (c) 2004-2008, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "config.h" #include "port.h" #include "name_table.h" /* Connection management: */ #define PROBING_INTERVAL 3600000 /* [ms] => 1 h */ #define CONFIRMED 0 #define PROBING 1 #define MAX_REJECT_SIZE 1024 static struct sk_buff *msg_queue_head; static struct sk_buff *msg_queue_tail; DEFINE_SPINLOCK(tipc_port_list_lock); static DEFINE_SPINLOCK(queue_lock); static LIST_HEAD(ports); static void port_handle_node_down(unsigned long ref); static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err); static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err); static void port_timeout(unsigned long ref); static u32 port_peernode(struct tipc_port *p_ptr) { return msg_destnode(&p_ptr->phdr); } static u32 port_peerport(struct tipc_port *p_ptr) { return msg_destport(&p_ptr->phdr); } /** * tipc_multicast - send a multicast message to local and remote destinations */ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 num_sect, struct iovec const *msg_sect, unsigned int total_len) { struct tipc_msg *hdr; struct sk_buff *buf; struct sk_buff *ibuf = NULL; struct tipc_port_list dports = {0, NULL, }; struct tipc_port *oport = tipc_port_deref(ref); int ext_targets; int res; if (unlikely(!oport)) return -EINVAL; /* Create multicast message */ hdr = &oport->phdr; msg_set_type(hdr, TIPC_MCAST_MSG); msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); msg_set_destport(hdr, 0); msg_set_destnode(hdr, 0); msg_set_nametype(hdr, seq->type); msg_set_namelower(hdr, seq->lower); msg_set_nameupper(hdr, seq->upper); msg_set_hdr_sz(hdr, MCAST_H_SIZE); res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE, !oport->user_port, &buf); if (unlikely(!buf)) return res; /* Figure out where to send multicast message */ ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper, TIPC_NODE_SCOPE, &dports); /* Send message to destinations (duplicate it only if necessary) */ if (ext_targets) { if (dports.count != 0) { ibuf = skb_copy(buf, GFP_ATOMIC); if (ibuf == NULL) { tipc_port_list_free(&dports); kfree_skb(buf); return -ENOMEM; } } res = tipc_bclink_send_msg(buf); if ((res < 0) && (dports.count != 0)) kfree_skb(ibuf); } else { ibuf = buf; } if (res >= 0) { if (ibuf) tipc_port_recv_mcast(ibuf, &dports); } else { tipc_port_list_free(&dports); } return res; } /** * tipc_port_recv_mcast - deliver multicast message to all destination ports * * If there is no port list, perform a lookup to create one */ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) { struct tipc_msg *msg; struct tipc_port_list dports = {0, NULL, }; struct tipc_port_list *item = dp; int cnt = 0; msg = buf_msg(buf); /* Create destination port list, if one wasn't supplied */ if (dp == NULL) { tipc_nametbl_mc_translate(msg_nametype(msg), msg_namelower(msg), msg_nameupper(msg), TIPC_CLUSTER_SCOPE, &dports); item = dp = &dports; } /* Deliver a copy of message to each destination port */ if (dp->count != 0) { msg_set_destnode(msg, tipc_own_addr); if (dp->count == 1) { msg_set_destport(msg, dp->ports[0]); tipc_port_recv_msg(buf); tipc_port_list_free(dp); return; } for (; cnt < dp->count; cnt++) { int index = cnt % PLSIZE; struct sk_buff *b = skb_clone(buf, GFP_ATOMIC); if (b == NULL) { warn("Unable to deliver multicast message(s)\n"); goto exit; } if ((index == 0) && (cnt != 0)) item = item->next; msg_set_destport(buf_msg(b), item->ports[index]); tipc_port_recv_msg(b); } } exit: kfree_skb(buf); tipc_port_list_free(dp); } /** * tipc_createport_raw - create a generic TIPC port * * Returns pointer to (locked) TIPC port, or NULL if unable to create it */ struct tipc_port *tipc_createport_raw(void *usr_handle, u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), void (*wakeup)(struct tipc_port *), const u32 importance) { struct tipc_port *p_ptr; struct tipc_msg *msg; u32 ref; p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); if (!p_ptr) { warn("Port creation failed, no memory\n"); return NULL; } ref = tipc_ref_acquire(p_ptr, &p_ptr->lock); if (!ref) { warn("Port creation failed, reference table exhausted\n"); kfree(p_ptr); return NULL; } p_ptr->usr_handle = usr_handle; p_ptr->max_pkt = MAX_PKT_DEFAULT; p_ptr->ref = ref; msg = &p_ptr->phdr; tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); msg_set_origport(msg, ref); INIT_LIST_HEAD(&p_ptr->wait_list); INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); p_ptr->dispatcher = dispatcher; p_ptr->wakeup = wakeup; p_ptr->user_port = NULL; k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); spin_lock_bh(&tipc_port_list_lock); INIT_LIST_HEAD(&p_ptr->publications); INIT_LIST_HEAD(&p_ptr->port_list); list_add_tail(&p_ptr->port_list, &ports); spin_unlock_bh(&tipc_port_list_lock); return p_ptr; } int tipc_deleteport(u32 ref) { struct tipc_port *p_ptr; struct sk_buff *buf = NULL; tipc_withdraw(ref, 0, NULL); p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; tipc_ref_discard(ref); tipc_port_unlock(p_ptr); k_cancel_timer(&p_ptr->timer); if (p_ptr->connected) { buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT); tipc_nodesub_unsubscribe(&p_ptr->subscription); } kfree(p_ptr->user_port); spin_lock_bh(&tipc_port_list_lock); list_del(&p_ptr->port_list); list_del(&p_ptr->wait_list); spin_unlock_bh(&tipc_port_list_lock); k_term_timer(&p_ptr->timer); kfree(p_ptr); tipc_net_route_msg(buf); return 0; } static int port_unreliable(struct tipc_port *p_ptr) { return msg_src_droppable(&p_ptr->phdr); } int tipc_portunreliable(u32 ref, unsigned int *isunreliable) { struct tipc_port *p_ptr; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; *isunreliable = port_unreliable(p_ptr); tipc_port_unlock(p_ptr); return 0; } int tipc_set_portunreliable(u32 ref, unsigned int isunreliable) { struct tipc_port *p_ptr; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; msg_set_src_droppable(&p_ptr->phdr, (isunreliable != 0)); tipc_port_unlock(p_ptr); return 0; } static int port_unreturnable(struct tipc_port *p_ptr) { return msg_dest_droppable(&p_ptr->phdr); } int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable) { struct tipc_port *p_ptr; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; *isunrejectable = port_unreturnable(p_ptr); tipc_port_unlock(p_ptr); return 0; } int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable) { struct tipc_port *p_ptr; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; msg_set_dest_droppable(&p_ptr->phdr, (isunrejectable != 0)); tipc_port_unlock(p_ptr); return 0; } /* * port_build_proto_msg(): create connection protocol message for port * * On entry the port must be locked and connected. */ static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr, u32 type, u32 ack) { struct sk_buff *buf; struct tipc_msg *msg; buf = tipc_buf_acquire(INT_H_SIZE); if (buf) { msg = buf_msg(buf); tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE, port_peernode(p_ptr)); msg_set_destport(msg, port_peerport(p_ptr)); msg_set_origport(msg, p_ptr->ref); msg_set_msgcnt(msg, ack); } return buf; } int tipc_reject_msg(struct sk_buff *buf, u32 err) { struct tipc_msg *msg = buf_msg(buf); struct sk_buff *rbuf; struct tipc_msg *rmsg; int hdr_sz; u32 imp; u32 data_sz = msg_data_sz(msg); u32 src_node; u32 rmsg_sz; /* discard rejected message if it shouldn't be returned to sender */ if (WARN(!msg_isdata(msg), "attempt to reject message with user=%u", msg_user(msg))) { dump_stack(); goto exit; } if (msg_errcode(msg) || msg_dest_droppable(msg)) goto exit; /* * construct returned message by copying rejected message header and * data (or subset), then updating header fields that need adjusting */ hdr_sz = msg_hdr_sz(msg); rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE); rbuf = tipc_buf_acquire(rmsg_sz); if (rbuf == NULL) goto exit; rmsg = buf_msg(rbuf); skb_copy_to_linear_data(rbuf, msg, rmsg_sz); if (msg_connected(rmsg)) { imp = msg_importance(rmsg); if (imp < TIPC_CRITICAL_IMPORTANCE) msg_set_importance(rmsg, ++imp); } msg_set_non_seq(rmsg, 0); msg_set_size(rmsg, rmsg_sz); msg_set_errcode(rmsg, err); msg_set_prevnode(rmsg, tipc_own_addr); msg_swap_words(rmsg, 4, 5); if (!msg_short(rmsg)) msg_swap_words(rmsg, 6, 7); /* send self-abort message when rejecting on a connected port */ if (msg_connected(msg)) { struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg)); if (p_ptr) { struct sk_buff *abuf = NULL; if (p_ptr->connected) abuf = port_build_self_abort_msg(p_ptr, err); tipc_port_unlock(p_ptr); tipc_net_route_msg(abuf); } } /* send returned message & dispose of rejected message */ src_node = msg_prevnode(msg); if (src_node == tipc_own_addr) tipc_port_recv_msg(rbuf); else tipc_link_send(rbuf, src_node, msg_link_selector(rmsg)); exit: kfree_skb(buf); return data_sz; } int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, int err) { struct sk_buff *buf; int res; res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE, !p_ptr->user_port, &buf); if (!buf) return res; return tipc_reject_msg(buf, err); } static void port_timeout(unsigned long ref) { struct tipc_port *p_ptr = tipc_port_lock(ref); struct sk_buff *buf = NULL; if (!p_ptr) return; if (!p_ptr->connected) { tipc_port_unlock(p_ptr); return; } /* Last probe answered ? */ if (p_ptr->probing_state == PROBING) { buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT); } else { buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0); p_ptr->probing_state = PROBING; k_start_timer(&p_ptr->timer, p_ptr->probing_interval); } tipc_port_unlock(p_ptr); tipc_net_route_msg(buf); } static void port_handle_node_down(unsigned long ref) { struct tipc_port *p_ptr = tipc_port_lock(ref); struct sk_buff *buf = NULL; if (!p_ptr) return; buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE); tipc_port_unlock(p_ptr); tipc_net_route_msg(buf); } static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err) { struct sk_buff *buf = port_build_peer_abort_msg(p_ptr, err); if (buf) { struct tipc_msg *msg = buf_msg(buf); msg_swap_words(msg, 4, 5); msg_swap_words(msg, 6, 7); } return buf; } static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err) { struct sk_buff *buf; struct tipc_msg *msg; u32 imp; if (!p_ptr->connected) return NULL; buf = tipc_buf_acquire(BASIC_H_SIZE); if (buf) { msg = buf_msg(buf); memcpy(msg, &p_ptr->phdr, BASIC_H_SIZE); msg_set_hdr_sz(msg, BASIC_H_SIZE); msg_set_size(msg, BASIC_H_SIZE); imp = msg_importance(msg); if (imp < TIPC_CRITICAL_IMPORTANCE) msg_set_importance(msg, ++imp); msg_set_errcode(msg, err); } return buf; } void tipc_port_recv_proto_msg(struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); struct tipc_port *p_ptr; struct sk_buff *r_buf = NULL; u32 orignode = msg_orignode(msg); u32 origport = msg_origport(msg); u32 destport = msg_destport(msg); int wakeable; /* Validate connection */ p_ptr = tipc_port_lock(destport); if (!p_ptr || !p_ptr->connected || (port_peernode(p_ptr) != orignode) || (port_peerport(p_ptr) != origport)) { r_buf = tipc_buf_acquire(BASIC_H_SIZE); if (r_buf) { msg = buf_msg(r_buf); tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG, BASIC_H_SIZE, orignode); msg_set_errcode(msg, TIPC_ERR_NO_PORT); msg_set_origport(msg, destport); msg_set_destport(msg, origport); } if (p_ptr) tipc_port_unlock(p_ptr); goto exit; } /* Process protocol message sent by peer */ switch (msg_type(msg)) { case CONN_ACK: wakeable = tipc_port_congested(p_ptr) && p_ptr->congested && p_ptr->wakeup; p_ptr->acked += msg_msgcnt(msg); if (!tipc_port_congested(p_ptr)) { p_ptr->congested = 0; if (wakeable) p_ptr->wakeup(p_ptr); } break; case CONN_PROBE: r_buf = port_build_proto_msg(p_ptr, CONN_PROBE_REPLY, 0); break; default: /* CONN_PROBE_REPLY or unrecognized - no action required */ break; } p_ptr->probing_state = CONFIRMED; tipc_port_unlock(p_ptr); exit: tipc_net_route_msg(r_buf); kfree_skb(buf); } static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id) { struct publication *publ; if (full_id) tipc_printf(buf, "<%u.%u.%u:%u>:", tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), tipc_node(tipc_own_addr), p_ptr->ref); else tipc_printf(buf, "%-10u:", p_ptr->ref); if (p_ptr->connected) { u32 dport = port_peerport(p_ptr); u32 destnode = port_peernode(p_ptr); tipc_printf(buf, " connected to <%u.%u.%u:%u>", tipc_zone(destnode), tipc_cluster(destnode), tipc_node(destnode), dport); if (p_ptr->conn_type != 0) tipc_printf(buf, " via {%u,%u}", p_ptr->conn_type, p_ptr->conn_instance); } else if (p_ptr->published) { tipc_printf(buf, " bound to"); list_for_each_entry(publ, &p_ptr->publications, pport_list) { if (publ->lower == publ->upper) tipc_printf(buf, " {%u,%u}", publ->type, publ->lower); else tipc_printf(buf, " {%u,%u,%u}", publ->type, publ->lower, publ->upper); } } tipc_printf(buf, "\n"); } #define MAX_PORT_QUERY 32768 struct sk_buff *tipc_port_get_ports(void) { struct sk_buff *buf; struct tlv_desc *rep_tlv; struct print_buf pb; struct tipc_port *p_ptr; int str_len; buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY)); if (!buf) return NULL; rep_tlv = (struct tlv_desc *)buf->data; tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY); spin_lock_bh(&tipc_port_list_lock); list_for_each_entry(p_ptr, &ports, port_list) { spin_lock_bh(p_ptr->lock); port_print(p_ptr, &pb, 0); spin_unlock_bh(p_ptr->lock); } spin_unlock_bh(&tipc_port_list_lock); str_len = tipc_printbuf_validate(&pb); skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); return buf; } void tipc_port_reinit(void) { struct tipc_port *p_ptr; struct tipc_msg *msg; spin_lock_bh(&tipc_port_list_lock); list_for_each_entry(p_ptr, &ports, port_list) { msg = &p_ptr->phdr; if (msg_orignode(msg) == tipc_own_addr) break; msg_set_prevnode(msg, tipc_own_addr); msg_set_orignode(msg, tipc_own_addr); } spin_unlock_bh(&tipc_port_list_lock); } /* * port_dispatcher_sigh(): Signal handler for messages destinated * to the tipc_port interface. */ static void port_dispatcher_sigh(void *dummy) { struct sk_buff *buf; spin_lock_bh(&queue_lock); buf = msg_queue_head; msg_queue_head = NULL; spin_unlock_bh(&queue_lock); while (buf) { struct tipc_port *p_ptr; struct user_port *up_ptr; struct tipc_portid orig; struct tipc_name_seq dseq; void *usr_handle; int connected; int published; u32 message_type; struct sk_buff *next = buf->next; struct tipc_msg *msg = buf_msg(buf); u32 dref = msg_destport(msg); message_type = msg_type(msg); if (message_type > TIPC_DIRECT_MSG) goto reject; /* Unsupported message type */ p_ptr = tipc_port_lock(dref); if (!p_ptr) goto reject; /* Port deleted while msg in queue */ orig.ref = msg_origport(msg); orig.node = msg_orignode(msg); up_ptr = p_ptr->user_port; usr_handle = up_ptr->usr_handle; connected = p_ptr->connected; published = p_ptr->published; if (unlikely(msg_errcode(msg))) goto err; switch (message_type) { case TIPC_CONN_MSG:{ tipc_conn_msg_event cb = up_ptr->conn_msg_cb; u32 peer_port = port_peerport(p_ptr); u32 peer_node = port_peernode(p_ptr); u32 dsz; tipc_port_unlock(p_ptr); if (unlikely(!cb)) goto reject; if (unlikely(!connected)) { if (tipc_connect2port(dref, &orig)) goto reject; } else if ((msg_origport(msg) != peer_port) || (msg_orignode(msg) != peer_node)) goto reject; dsz = msg_data_sz(msg); if (unlikely(dsz && (++p_ptr->conn_unacked >= TIPC_FLOW_CONTROL_WIN))) tipc_acknowledge(dref, p_ptr->conn_unacked); skb_pull(buf, msg_hdr_sz(msg)); cb(usr_handle, dref, &buf, msg_data(msg), dsz); break; } case TIPC_DIRECT_MSG:{ tipc_msg_event cb = up_ptr->msg_cb; tipc_port_unlock(p_ptr); if (unlikely(!cb || connected)) goto reject; skb_pull(buf, msg_hdr_sz(msg)); cb(usr_handle, dref, &buf, msg_data(msg), msg_data_sz(msg), msg_importance(msg), &orig); break; } case TIPC_MCAST_MSG: case TIPC_NAMED_MSG:{ tipc_named_msg_event cb = up_ptr->named_msg_cb; tipc_port_unlock(p_ptr); if (unlikely(!cb || connected || !published)) goto reject; dseq.type = msg_nametype(msg); dseq.lower = msg_nameinst(msg); dseq.upper = (message_type == TIPC_NAMED_MSG) ? dseq.lower : msg_nameupper(msg); skb_pull(buf, msg_hdr_sz(msg)); cb(usr_handle, dref, &buf, msg_data(msg), msg_data_sz(msg), msg_importance(msg), &orig, &dseq); break; } } if (buf) kfree_skb(buf); buf = next; continue; err: switch (message_type) { case TIPC_CONN_MSG:{ tipc_conn_shutdown_event cb = up_ptr->conn_err_cb; u32 peer_port = port_peerport(p_ptr); u32 peer_node = port_peernode(p_ptr); tipc_port_unlock(p_ptr); if (!cb || !connected) break; if ((msg_origport(msg) != peer_port) || (msg_orignode(msg) != peer_node)) break; tipc_disconnect(dref); skb_pull(buf, msg_hdr_sz(msg)); cb(usr_handle, dref, &buf, msg_data(msg), msg_data_sz(msg), msg_errcode(msg)); break; } case TIPC_DIRECT_MSG:{ tipc_msg_err_event cb = up_ptr->err_cb; tipc_port_unlock(p_ptr); if (!cb || connected) break; skb_pull(buf, msg_hdr_sz(msg)); cb(usr_handle, dref, &buf, msg_data(msg), msg_data_sz(msg), msg_errcode(msg), &orig); break; } case TIPC_MCAST_MSG: case TIPC_NAMED_MSG:{ tipc_named_msg_err_event cb = up_ptr->named_err_cb; tipc_port_unlock(p_ptr); if (!cb || connected) break; dseq.type = msg_nametype(msg); dseq.lower = msg_nameinst(msg); dseq.upper = (message_type == TIPC_NAMED_MSG) ? dseq.lower : msg_nameupper(msg); skb_pull(buf, msg_hdr_sz(msg)); cb(usr_handle, dref, &buf, msg_data(msg), msg_data_sz(msg), msg_errcode(msg), &dseq); break; } } if (buf) kfree_skb(buf); buf = next; continue; reject: tipc_reject_msg(buf, TIPC_ERR_NO_PORT); buf = next; } } /* * port_dispatcher(): Dispatcher for messages destinated * to the tipc_port interface. Called with port locked. */ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) { buf->next = NULL; spin_lock_bh(&queue_lock); if (msg_queue_head) { msg_queue_tail->next = buf; msg_queue_tail = buf; } else { msg_queue_tail = msg_queue_head = buf; tipc_k_signal((Handler)port_dispatcher_sigh, 0); } spin_unlock_bh(&queue_lock); return 0; } /* * Wake up port after congestion: Called with port locked, * */ static void port_wakeup_sh(unsigned long ref) { struct tipc_port *p_ptr; struct user_port *up_ptr; tipc_continue_event cb = NULL; void *uh = NULL; p_ptr = tipc_port_lock(ref); if (p_ptr) { up_ptr = p_ptr->user_port; if (up_ptr) { cb = up_ptr->continue_event_cb; uh = up_ptr->usr_handle; } tipc_port_unlock(p_ptr); } if (cb) cb(uh, ref); } static void port_wakeup(struct tipc_port *p_ptr) { tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref); } void tipc_acknowledge(u32 ref, u32 ack) { struct tipc_port *p_ptr; struct sk_buff *buf = NULL; p_ptr = tipc_port_lock(ref); if (!p_ptr) return; if (p_ptr->connected) { p_ptr->conn_unacked -= ack; buf = port_build_proto_msg(p_ptr, CONN_ACK, ack); } tipc_port_unlock(p_ptr); tipc_net_route_msg(buf); } /* * tipc_createport(): user level call. */ int tipc_createport(void *usr_handle, unsigned int importance, tipc_msg_err_event error_cb, tipc_named_msg_err_event named_error_cb, tipc_conn_shutdown_event conn_error_cb, tipc_msg_event msg_cb, tipc_named_msg_event named_msg_cb, tipc_conn_msg_event conn_msg_cb, tipc_continue_event continue_event_cb,/* May be zero */ u32 *portref) { struct user_port *up_ptr; struct tipc_port *p_ptr; up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); if (!up_ptr) { warn("Port creation failed, no memory\n"); return -ENOMEM; } p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); if (!p_ptr) { kfree(up_ptr); return -ENOMEM; } p_ptr->user_port = up_ptr; up_ptr->usr_handle = usr_handle; up_ptr->ref = p_ptr->ref; up_ptr->err_cb = error_cb; up_ptr->named_err_cb = named_error_cb; up_ptr->conn_err_cb = conn_error_cb; up_ptr->msg_cb = msg_cb; up_ptr->named_msg_cb = named_msg_cb; up_ptr->conn_msg_cb = conn_msg_cb; up_ptr->continue_event_cb = continue_event_cb; *portref = p_ptr->ref; tipc_port_unlock(p_ptr); return 0; } int tipc_portimportance(u32 ref, unsigned int *importance) { struct tipc_port *p_ptr; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; *importance = (unsigned int)msg_importance(&p_ptr->phdr); tipc_port_unlock(p_ptr); return 0; } int tipc_set_portimportance(u32 ref, unsigned int imp) { struct tipc_port *p_ptr; if (imp > TIPC_CRITICAL_IMPORTANCE) return -EINVAL; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; msg_set_importance(&p_ptr->phdr, (u32)imp); tipc_port_unlock(p_ptr); return 0; } int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) { struct tipc_port *p_ptr; struct publication *publ; u32 key; int res = -EINVAL; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; if (p_ptr->connected) goto exit; if (seq->lower > seq->upper) goto exit; if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE)) goto exit; key = ref + p_ptr->pub_count + 1; if (key == ref) { res = -EADDRINUSE; goto exit; } publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, scope, p_ptr->ref, key); if (publ) { list_add(&publ->pport_list, &p_ptr->publications); p_ptr->pub_count++; p_ptr->published = 1; res = 0; } exit: tipc_port_unlock(p_ptr); return res; } int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) { struct tipc_port *p_ptr; struct publication *publ; struct publication *tpubl; int res = -EINVAL; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; if (!seq) { list_for_each_entry_safe(publ, tpubl, &p_ptr->publications, pport_list) { tipc_nametbl_withdraw(publ->type, publ->lower, publ->ref, publ->key); } res = 0; } else { list_for_each_entry_safe(publ, tpubl, &p_ptr->publications, pport_list) { if (publ->scope != scope) continue; if (publ->type != seq->type) continue; if (publ->lower != seq->lower) continue; if (publ->upper != seq->upper) break; tipc_nametbl_withdraw(publ->type, publ->lower, publ->ref, publ->key); res = 0; break; } } if (list_empty(&p_ptr->publications)) p_ptr->published = 0; tipc_port_unlock(p_ptr); return res; } int tipc_connect2port(u32 ref, struct tipc_portid const *peer) { struct tipc_port *p_ptr; struct tipc_msg *msg; int res = -EINVAL; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; if (p_ptr->published || p_ptr->connected) goto exit; if (!peer->ref) goto exit; msg = &p_ptr->phdr; msg_set_destnode(msg, peer->node); msg_set_destport(msg, peer->ref); msg_set_type(msg, TIPC_CONN_MSG); msg_set_lookup_scope(msg, 0); msg_set_hdr_sz(msg, SHORT_H_SIZE); p_ptr->probing_interval = PROBING_INTERVAL; p_ptr->probing_state = CONFIRMED; p_ptr->connected = 1; k_start_timer(&p_ptr->timer, p_ptr->probing_interval); tipc_nodesub_subscribe(&p_ptr->subscription, peer->node, (void *)(unsigned long)ref, (net_ev_handler)port_handle_node_down); res = 0; exit: tipc_port_unlock(p_ptr); p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref); return res; } /** * tipc_disconnect_port - disconnect port from peer * * Port must be locked. */ int tipc_disconnect_port(struct tipc_port *tp_ptr) { int res; if (tp_ptr->connected) { tp_ptr->connected = 0; /* let timer expire on it's own to avoid deadlock! */ tipc_nodesub_unsubscribe( &((struct tipc_port *)tp_ptr)->subscription); res = 0; } else { res = -ENOTCONN; } return res; } /* * tipc_disconnect(): Disconnect port form peer. * This is a node local operation. */ int tipc_disconnect(u32 ref) { struct tipc_port *p_ptr; int res; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; res = tipc_disconnect_port((struct tipc_port *)p_ptr); tipc_port_unlock(p_ptr); return res; } /* * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect */ int tipc_shutdown(u32 ref) { struct tipc_port *p_ptr; struct sk_buff *buf = NULL; p_ptr = tipc_port_lock(ref); if (!p_ptr) return -EINVAL; buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN); tipc_port_unlock(p_ptr); tipc_net_route_msg(buf); return tipc_disconnect(ref); } /** * tipc_port_recv_msg - receive message from lower layer and deliver to port user */ int tipc_port_recv_msg(struct sk_buff *buf) { struct tipc_port *p_ptr; struct tipc_msg *msg = buf_msg(buf); u32 destport = msg_destport(msg); u32 dsz = msg_data_sz(msg); u32 err; /* forward unresolved named message */ if (unlikely(!destport)) { tipc_net_route_msg(buf); return dsz; } /* validate destination & pass to port, otherwise reject message */ p_ptr = tipc_port_lock(destport); if (likely(p_ptr)) { if (likely(p_ptr->connected)) { if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) || (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) || (unlikely(!msg_connected(msg)))) { err = TIPC_ERR_NO_PORT; tipc_port_unlock(p_ptr); goto reject; } } err = p_ptr->dispatcher(p_ptr, buf); tipc_port_unlock(p_ptr); if (likely(!err)) return dsz; } else { err = TIPC_ERR_NO_PORT; } reject: return tipc_reject_msg(buf, err); } /* * tipc_port_recv_sections(): Concatenate and deliver sectioned * message for this node. */ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) { struct sk_buff *buf; int res; res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE, !sender->user_port, &buf); if (likely(buf)) tipc_port_recv_msg(buf); return res; } /** * tipc_send - send message sections on connection */ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) { struct tipc_port *p_ptr; u32 destnode; int res; p_ptr = tipc_port_deref(ref); if (!p_ptr || !p_ptr->connected) return -EINVAL; p_ptr->congested = 1; if (!tipc_port_congested(p_ptr)) { destnode = port_peernode(p_ptr); if (likely(destnode != tipc_own_addr)) res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, destnode); else res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); if (likely(res != -ELINKCONG)) { p_ptr->congested = 0; if (res > 0) p_ptr->sent++; return res; } } if (port_unreliable(p_ptr)) { p_ptr->congested = 0; return total_len; } return -ELINKCONG; } /** * tipc_send2name - send message sections to port name */ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) { struct tipc_port *p_ptr; struct tipc_msg *msg; u32 destnode = domain; u32 destport; int res; p_ptr = tipc_port_deref(ref); if (!p_ptr || p_ptr->connected) return -EINVAL; msg = &p_ptr->phdr; msg_set_type(msg, TIPC_NAMED_MSG); msg_set_hdr_sz(msg, NAMED_H_SIZE); msg_set_nametype(msg, name->type); msg_set_nameinst(msg, name->instance); msg_set_lookup_scope(msg, tipc_addr_scope(domain)); destport = tipc_nametbl_translate(name->type, name->instance, &destnode); msg_set_destnode(msg, destnode); msg_set_destport(msg, destport); if (likely(destport || destnode)) { if (likely(destnode == tipc_own_addr)) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); else res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, destnode); if (likely(res != -ELINKCONG)) { if (res > 0) p_ptr->sent++; return res; } if (port_unreliable(p_ptr)) { return total_len; } return -ELINKCONG; } return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, total_len, TIPC_ERR_NO_NAME); } /** * tipc_send2port - send message sections to port identity */ int tipc_send2port(u32 ref, struct tipc_portid const *dest, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) { struct tipc_port *p_ptr; struct tipc_msg *msg; int res; p_ptr = tipc_port_deref(ref); if (!p_ptr || p_ptr->connected) return -EINVAL; msg = &p_ptr->phdr; msg_set_type(msg, TIPC_DIRECT_MSG); msg_set_lookup_scope(msg, 0); msg_set_destnode(msg, dest->node); msg_set_destport(msg, dest->ref); msg_set_hdr_sz(msg, BASIC_H_SIZE); if (dest->node == tipc_own_addr) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); else res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, dest->node); if (likely(res != -ELINKCONG)) { if (res > 0) p_ptr->sent++; return res; } if (port_unreliable(p_ptr)) { return total_len; } return -ELINKCONG; } /** * tipc_send_buf2port - send message buffer to port identity */ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, struct sk_buff *buf, unsigned int dsz) { struct tipc_port *p_ptr; struct tipc_msg *msg; int res; p_ptr = (struct tipc_port *)tipc_ref_deref(ref); if (!p_ptr || p_ptr->connected) return -EINVAL; msg = &p_ptr->phdr; msg_set_type(msg, TIPC_DIRECT_MSG); msg_set_destnode(msg, dest->node); msg_set_destport(msg, dest->ref); msg_set_hdr_sz(msg, BASIC_H_SIZE); msg_set_size(msg, BASIC_H_SIZE + dsz); if (skb_cow(buf, BASIC_H_SIZE)) return -ENOMEM; skb_push(buf, BASIC_H_SIZE); skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE); if (dest->node == tipc_own_addr) res = tipc_port_recv_msg(buf); else res = tipc_send_buf_fast(buf, dest->node); if (likely(res != -ELINKCONG)) { if (res > 0) p_ptr->sent++; return res; } if (port_unreliable(p_ptr)) return dsz; return -ELINKCONG; }
gpl-2.0
CyanogenMod/android_kernel_samsung_jf
drivers/media/video/s5p-jpeg/jpeg-core.c
4803
40496
/* linux/drivers/media/video/s5p-jpeg/jpeg-core.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "jpeg-core.h" #include "jpeg-hw.h" static struct s5p_jpeg_fmt formats_enc[] = { { .name = "JPEG JFIF", .fourcc = V4L2_PIX_FMT_JPEG, .colplanes = 1, .types = MEM2MEM_CAPTURE, }, { .name = "YUV 4:2:2 packed, YCbYCr", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .colplanes = 1, .types = MEM2MEM_OUTPUT, }, { .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565, .depth = 16, .colplanes = 1, .types = MEM2MEM_OUTPUT, }, }; #define NUM_FORMATS_ENC ARRAY_SIZE(formats_enc) static struct s5p_jpeg_fmt formats_dec[] = { { .name = "YUV 4:2:0 planar, YCbCr", .fourcc = V4L2_PIX_FMT_YUV420, .depth = 12, .colplanes = 3, .h_align = 4, .v_align = 4, .types = MEM2MEM_CAPTURE, }, { .name = "YUV 4:2:2 packed, YCbYCr", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .colplanes = 1, .h_align = 4, .v_align = 3, .types = MEM2MEM_CAPTURE, }, { .name = "JPEG JFIF", .fourcc = V4L2_PIX_FMT_JPEG, .colplanes = 1, .types = MEM2MEM_OUTPUT, }, }; #define NUM_FORMATS_DEC ARRAY_SIZE(formats_dec) static const unsigned char qtbl_luminance[4][64] = { {/* level 1 - high quality */ 8, 6, 6, 8, 12, 14, 16, 17, 6, 6, 6, 8, 10, 13, 12, 15, 6, 6, 7, 8, 13, 14, 18, 24, 8, 8, 8, 14, 13, 19, 24, 35, 12, 10, 13, 13, 20, 26, 34, 39, 14, 13, 14, 19, 26, 34, 39, 39, 16, 12, 18, 24, 34, 39, 39, 39, 17, 15, 24, 35, 39, 39, 39, 39 }, {/* level 2 */ 12, 8, 8, 12, 17, 21, 24, 23, 8, 9, 9, 11, 15, 19, 18, 23, 8, 9, 10, 12, 19, 20, 27, 36, 12, 11, 12, 21, 20, 28, 36, 53, 17, 15, 19, 20, 30, 39, 51, 59, 21, 19, 20, 28, 39, 51, 59, 59, 24, 18, 27, 36, 51, 59, 59, 59, 23, 23, 36, 53, 59, 59, 59, 59 }, {/* level 3 */ 16, 11, 11, 16, 23, 27, 31, 30, 11, 12, 12, 15, 20, 23, 23, 30, 11, 12, 13, 16, 23, 26, 35, 47, 16, 15, 16, 23, 26, 37, 47, 64, 23, 20, 23, 26, 39, 51, 64, 64, 27, 23, 26, 37, 51, 64, 64, 64, 31, 23, 35, 47, 64, 64, 64, 64, 30, 30, 47, 64, 64, 64, 64, 64 }, {/*level 4 - low quality */ 20, 16, 25, 39, 50, 46, 62, 68, 16, 18, 23, 38, 38, 53, 65, 68, 25, 23, 31, 38, 53, 65, 68, 68, 39, 38, 38, 53, 65, 68, 68, 68, 50, 38, 53, 65, 68, 68, 68, 68, 46, 53, 65, 68, 68, 68, 68, 68, 62, 65, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68 } }; static const unsigned char qtbl_chrominance[4][64] = { {/* level 1 - high quality */ 9, 8, 9, 11, 14, 17, 19, 24, 8, 10, 9, 11, 14, 13, 17, 22, 9, 9, 13, 14, 13, 15, 23, 26, 11, 11, 14, 14, 15, 20, 26, 33, 14, 14, 13, 15, 20, 24, 33, 39, 17, 13, 15, 20, 24, 32, 39, 39, 19, 17, 23, 26, 33, 39, 39, 39, 24, 22, 26, 33, 39, 39, 39, 39 }, {/* level 2 */ 13, 11, 13, 16, 20, 20, 29, 37, 11, 14, 14, 14, 16, 20, 26, 32, 13, 14, 15, 17, 20, 23, 35, 40, 16, 14, 17, 21, 23, 30, 40, 50, 20, 16, 20, 23, 30, 37, 50, 59, 20, 20, 23, 30, 37, 48, 59, 59, 29, 26, 35, 40, 50, 59, 59, 59, 37, 32, 40, 50, 59, 59, 59, 59 }, {/* level 3 */ 17, 15, 17, 21, 20, 26, 38, 48, 15, 19, 18, 17, 20, 26, 35, 43, 17, 18, 20, 22, 26, 30, 46, 53, 21, 17, 22, 28, 30, 39, 53, 64, 20, 20, 26, 30, 39, 48, 64, 64, 26, 26, 30, 39, 48, 63, 64, 64, 38, 35, 46, 53, 64, 64, 64, 64, 48, 43, 53, 64, 64, 64, 64, 64 }, {/*level 4 - low quality */ 21, 25, 32, 38, 54, 68, 68, 68, 25, 28, 24, 38, 54, 68, 68, 68, 32, 24, 32, 43, 66, 68, 68, 68, 38, 38, 43, 53, 68, 68, 68, 68, 54, 54, 66, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68 } }; static const unsigned char hdctbl0[16] = { 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }; static const unsigned char hdctblg0[12] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb }; static const unsigned char hactbl0[16] = { 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }; static const unsigned char hactblg0[162] = { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c) { return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler); } static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh) { return container_of(fh, struct s5p_jpeg_ctx, fh); } static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl, unsigned long tab, int len) { int i; for (i = 0; i < len; i++) writel((unsigned int)qtbl[i], regs + tab + (i * 0x04)); } static inline void jpeg_set_qtbl_lum(void __iomem *regs, int quality) { /* this driver fills quantisation table 0 with data for luma */ jpeg_set_qtbl(regs, qtbl_luminance[quality], S5P_JPG_QTBL_CONTENT(0), ARRAY_SIZE(qtbl_luminance[quality])); } static inline void jpeg_set_qtbl_chr(void __iomem *regs, int quality) { /* this driver fills quantisation table 1 with data for chroma */ jpeg_set_qtbl(regs, qtbl_chrominance[quality], S5P_JPG_QTBL_CONTENT(1), ARRAY_SIZE(qtbl_chrominance[quality])); } static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl, unsigned long tab, int len) { int i; for (i = 0; i < len; i++) writel((unsigned int)htbl[i], regs + tab + (i * 0x04)); } static inline void jpeg_set_hdctbl(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0), ARRAY_SIZE(hdctbl0)); } static inline void jpeg_set_hdctblg(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0), ARRAY_SIZE(hdctblg0)); } static inline void jpeg_set_hactbl(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0), ARRAY_SIZE(hactbl0)); } static inline void jpeg_set_hactblg(void __iomem *regs) { /* this driver fills table 0 for this component */ jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0), ARRAY_SIZE(hactblg0)); } /* * ============================================================================ * Device file operations * ============================================================================ */ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq); static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode, __u32 pixelformat); static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx); static int s5p_jpeg_open(struct file *file) { struct s5p_jpeg *jpeg = video_drvdata(file); struct video_device *vfd = video_devdata(file); struct s5p_jpeg_ctx *ctx; struct s5p_jpeg_fmt *out_fmt; int ret = 0; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; v4l2_fh_init(&ctx->fh, vfd); /* Use separate control handler per file handle */ ctx->fh.ctrl_handler = &ctx->ctrl_handler; file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->jpeg = jpeg; if (vfd == jpeg->vfd_encoder) { ctx->mode = S5P_JPEG_ENCODE; out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_RGB565); } else { ctx->mode = S5P_JPEG_DECODE; out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_JPEG); } ret = s5p_jpeg_controls_create(ctx); if (ret < 0) goto error; ctx->m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init); if (IS_ERR(ctx->m2m_ctx)) { ret = PTR_ERR(ctx->m2m_ctx); goto error; } ctx->out_q.fmt = out_fmt; ctx->cap_q.fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_YUYV); return 0; error: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return ret; } static int s5p_jpeg_release(struct file *file) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); v4l2_m2m_ctx_release(ctx->m2m_ctx); v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return 0; } static unsigned int s5p_jpeg_poll(struct file *file, struct poll_table_struct *wait) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); return v4l2_m2m_poll(file, ctx->m2m_ctx, wait); } static int s5p_jpeg_mmap(struct file *file, struct vm_area_struct *vma) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); } static const struct v4l2_file_operations s5p_jpeg_fops = { .owner = THIS_MODULE, .open = s5p_jpeg_open, .release = s5p_jpeg_release, .poll = s5p_jpeg_poll, .unlocked_ioctl = video_ioctl2, .mmap = s5p_jpeg_mmap, }; /* * ============================================================================ * video ioctl operations * ============================================================================ */ static int get_byte(struct s5p_jpeg_buffer *buf) { if (buf->curr >= buf->size) return -1; return ((unsigned char *)buf->data)[buf->curr++]; } static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word) { unsigned int temp; int byte; byte = get_byte(buf); if (byte == -1) return -1; temp = byte << 8; byte = get_byte(buf); if (byte == -1) return -1; *word = (unsigned int)byte | temp; return 0; } static void skip(struct s5p_jpeg_buffer *buf, long len) { if (len <= 0) return; while (len--) get_byte(buf); } static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, unsigned long buffer, unsigned long size) { int c, components, notfound; unsigned int height, width, word; long length; struct s5p_jpeg_buffer jpeg_buffer; jpeg_buffer.size = size; jpeg_buffer.data = buffer; jpeg_buffer.curr = 0; notfound = 1; while (notfound) { c = get_byte(&jpeg_buffer); if (c == -1) break; if (c != 0xff) continue; do c = get_byte(&jpeg_buffer); while (c == 0xff); if (c == -1) break; if (c == 0) continue; length = 0; switch (c) { /* SOF0: baseline JPEG */ case SOF0: if (get_word_be(&jpeg_buffer, &word)) break; if (get_byte(&jpeg_buffer) == -1) break; if (get_word_be(&jpeg_buffer, &height)) break; if (get_word_be(&jpeg_buffer, &width)) break; components = get_byte(&jpeg_buffer); if (components == -1) break; notfound = 0; skip(&jpeg_buffer, components * 3); break; /* skip payload-less markers */ case RST ... RST + 7: case SOI: case EOI: case TEM: break; /* skip uninteresting payload markers */ default: if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; skip(&jpeg_buffer, length); break; } } result->w = width; result->h = height; result->size = components; return !notfound; } static int s5p_jpeg_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) { strlcpy(cap->driver, S5P_JPEG_M2M_NAME " encoder", sizeof(cap->driver)); strlcpy(cap->card, S5P_JPEG_M2M_NAME " encoder", sizeof(cap->card)); } else { strlcpy(cap->driver, S5P_JPEG_M2M_NAME " decoder", sizeof(cap->driver)); strlcpy(cap->card, S5P_JPEG_M2M_NAME " decoder", sizeof(cap->card)); } cap->bus_info[0] = 0; cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT; return 0; } static int enum_fmt(struct s5p_jpeg_fmt *formats, int n, struct v4l2_fmtdesc *f, u32 type) { int i, num = 0; for (i = 0; i < n; ++i) { if (formats[i].types & type) { /* index-th format of type type found ? */ if (num == f->index) break; /* Correct type but haven't reached our index yet, * just increment per-type index */ ++num; } } /* Format not found */ if (i >= n) return -EINVAL; strlcpy(f->description, formats[i].name, sizeof(f->description)); f->pixelformat = formats[i].fourcc; return 0; } static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) return enum_fmt(formats_enc, NUM_FORMATS_ENC, f, MEM2MEM_CAPTURE); return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_CAPTURE); } static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) return enum_fmt(formats_enc, NUM_FORMATS_ENC, f, MEM2MEM_OUTPUT); return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_OUTPUT); } static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx, enum v4l2_buf_type type) { if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return &ctx->out_q; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return &ctx->cap_q; return NULL; } static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct vb2_queue *vq; struct s5p_jpeg_q_data *q_data = NULL; struct v4l2_pix_format *pix = &f->fmt.pix; struct s5p_jpeg_ctx *ct = fh_to_ctx(priv); vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type); if (!vq) return -EINVAL; if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed) return -EINVAL; q_data = get_q_data(ct, f->type); BUG_ON(q_data == NULL); pix->width = q_data->w; pix->height = q_data->h; pix->field = V4L2_FIELD_NONE; pix->pixelformat = q_data->fmt->fourcc; pix->bytesperline = 0; if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) { u32 bpl = q_data->w; if (q_data->fmt->colplanes == 1) bpl = (bpl * q_data->fmt->depth) >> 3; pix->bytesperline = bpl; } pix->sizeimage = q_data->size; return 0; } static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode, u32 pixelformat) { unsigned int k; struct s5p_jpeg_fmt *formats; int n; if (mode == S5P_JPEG_ENCODE) { formats = formats_enc; n = NUM_FORMATS_ENC; } else { formats = formats_dec; n = NUM_FORMATS_DEC; } for (k = 0; k < n; k++) { struct s5p_jpeg_fmt *fmt = &formats[k]; if (fmt->fourcc == pixelformat) return fmt; } return NULL; } static void jpeg_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, unsigned int walign, u32 *h, unsigned int hmin, unsigned int hmax, unsigned int halign) { int width, height, w_step, h_step; width = *w; height = *h; w_step = 1 << walign; h_step = 1 << halign; v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0); if (*w < width && (*w + w_step) < wmax) *w += w_step; if (*h < height && (*h + h_step) < hmax) *h += h_step; } static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt, struct s5p_jpeg_ctx *ctx, int q_type) { struct v4l2_pix_format *pix = &f->fmt.pix; if (pix->field == V4L2_FIELD_ANY) pix->field = V4L2_FIELD_NONE; else if (pix->field != V4L2_FIELD_NONE) return -EINVAL; /* V4L2 specification suggests the driver corrects the format struct * if any of the dimensions is unsupported */ if (q_type == MEM2MEM_OUTPUT) jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH, S5P_JPEG_MAX_WIDTH, 0, &pix->height, S5P_JPEG_MIN_HEIGHT, S5P_JPEG_MAX_HEIGHT, 0); else jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH, S5P_JPEG_MAX_WIDTH, fmt->h_align, &pix->height, S5P_JPEG_MIN_HEIGHT, S5P_JPEG_MAX_HEIGHT, fmt->v_align); if (fmt->fourcc == V4L2_PIX_FMT_JPEG) { if (pix->sizeimage <= 0) pix->sizeimage = PAGE_SIZE; pix->bytesperline = 0; } else { u32 bpl = pix->bytesperline; if (fmt->colplanes > 1 && bpl < pix->width) bpl = pix->width; /* planar */ if (fmt->colplanes == 1 && /* packed */ (bpl << 3) * fmt->depth < pix->width) bpl = (pix->width * fmt->depth) >> 3; pix->bytesperline = bpl; pix->sizeimage = (pix->width * pix->height * fmt->depth) >> 3; } return 0; } static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); struct s5p_jpeg_fmt *fmt; fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat); if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->jpeg->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_CAPTURE); } static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); struct s5p_jpeg_fmt *fmt; fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat); if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->jpeg->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_OUTPUT); } static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f) { struct vb2_queue *vq; struct s5p_jpeg_q_data *q_data = NULL; struct v4l2_pix_format *pix = &f->fmt.pix; vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(ct, f->type); BUG_ON(q_data == NULL); if (vb2_is_busy(vq)) { v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } q_data->fmt = s5p_jpeg_find_format(ct->mode, pix->pixelformat); q_data->w = pix->width; q_data->h = pix->height; if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3; else q_data->size = pix->sizeimage; return 0; } static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = s5p_jpeg_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return s5p_jpeg_s_fmt(fh_to_ctx(priv), f); } static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = s5p_jpeg_try_fmt_vid_out(file, priv, f); if (ret) return ret; return s5p_jpeg_s_fmt(fh_to_ctx(priv), f); } static int s5p_jpeg_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } static int s5p_jpeg_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } static int s5p_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } static int s5p_jpeg_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } static int s5p_jpeg_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } static int s5p_jpeg_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); } int s5p_jpeg_g_selection(struct file *file, void *priv, struct v4l2_selection *s) { struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; /* For JPEG blob active == default == bounds */ switch (s->target) { case V4L2_SEL_TGT_CROP_ACTIVE: case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_COMPOSE_ACTIVE: case V4L2_SEL_TGT_COMPOSE_DEFAULT: s->r.width = ctx->out_q.w; s->r.height = ctx->out_q.h; break; case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_PADDED: s->r.width = ctx->cap_q.w; s->r.height = ctx->cap_q.h; break; default: return -EINVAL; } s->r.left = 0; s->r.top = 0; return 0; } /* * V4L2 controls */ static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl); struct s5p_jpeg *jpeg = ctx->jpeg; unsigned long flags; switch (ctrl->id) { case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: spin_lock_irqsave(&jpeg->slock, flags); WARN_ON(ctx->subsampling > S5P_SUBSAMPLING_MODE_GRAY); if (ctx->subsampling > 2) ctrl->val = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; else ctrl->val = ctx->subsampling; spin_unlock_irqrestore(&jpeg->slock, flags); break; } return 0; } static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl); unsigned long flags; spin_lock_irqsave(&ctx->jpeg->slock, flags); switch (ctrl->id) { case V4L2_CID_JPEG_COMPRESSION_QUALITY: ctx->compr_quality = S5P_JPEG_COMPR_QUAL_WORST - ctrl->val; break; case V4L2_CID_JPEG_RESTART_INTERVAL: ctx->restart_interval = ctrl->val; break; case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: ctx->subsampling = ctrl->val; break; } spin_unlock_irqrestore(&ctx->jpeg->slock, flags); return 0; } static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = { .g_volatile_ctrl = s5p_jpeg_g_volatile_ctrl, .s_ctrl = s5p_jpeg_s_ctrl, }; static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx) { unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */ struct v4l2_ctrl *ctrl; v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3); if (ctx->mode == S5P_JPEG_ENCODE) { v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, 3, 1, 3); v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_RESTART_INTERVAL, 0, 3, 0xffff, 0); mask = ~0x06; /* 422, 420 */ } ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_CHROMA_SUBSAMPLING, V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask, V4L2_JPEG_CHROMA_SUBSAMPLING_422); if (ctx->ctrl_handler.error) return ctx->ctrl_handler.error; if (ctx->mode == S5P_JPEG_DECODE) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; return 0; } static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = { .vidioc_querycap = s5p_jpeg_querycap, .vidioc_enum_fmt_vid_cap = s5p_jpeg_enum_fmt_vid_cap, .vidioc_enum_fmt_vid_out = s5p_jpeg_enum_fmt_vid_out, .vidioc_g_fmt_vid_cap = s5p_jpeg_g_fmt, .vidioc_g_fmt_vid_out = s5p_jpeg_g_fmt, .vidioc_try_fmt_vid_cap = s5p_jpeg_try_fmt_vid_cap, .vidioc_try_fmt_vid_out = s5p_jpeg_try_fmt_vid_out, .vidioc_s_fmt_vid_cap = s5p_jpeg_s_fmt_vid_cap, .vidioc_s_fmt_vid_out = s5p_jpeg_s_fmt_vid_out, .vidioc_reqbufs = s5p_jpeg_reqbufs, .vidioc_querybuf = s5p_jpeg_querybuf, .vidioc_qbuf = s5p_jpeg_qbuf, .vidioc_dqbuf = s5p_jpeg_dqbuf, .vidioc_streamon = s5p_jpeg_streamon, .vidioc_streamoff = s5p_jpeg_streamoff, .vidioc_g_selection = s5p_jpeg_g_selection, }; /* * ============================================================================ * mem2mem callbacks * ============================================================================ */ static void s5p_jpeg_device_run(void *priv) { struct s5p_jpeg_ctx *ctx = priv; struct s5p_jpeg *jpeg = ctx->jpeg; struct vb2_buffer *src_buf, *dst_buf; unsigned long src_addr, dst_addr; src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); jpeg_reset(jpeg->regs); jpeg_poweron(jpeg->regs); jpeg_proc_mode(jpeg->regs, ctx->mode); if (ctx->mode == S5P_JPEG_ENCODE) { if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565) jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_565); else jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_422); jpeg_subsampling_mode(jpeg->regs, ctx->subsampling); jpeg_dri(jpeg->regs, ctx->restart_interval); jpeg_x(jpeg->regs, ctx->out_q.w); jpeg_y(jpeg->regs, ctx->out_q.h); jpeg_imgadr(jpeg->regs, src_addr); jpeg_jpgadr(jpeg->regs, dst_addr); /* ultimately comes from sizeimage from userspace */ jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size); /* JPEG RGB to YCbCr conversion matrix */ jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11); jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12); jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13); jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21); jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22); jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23); jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31); jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32); jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33); /* * JPEG IP allows storing 4 quantization tables * We fill table 0 for luma and table 1 for chroma */ jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality); jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality); /* use table 0 for Y */ jpeg_qtbl(jpeg->regs, 1, 0); /* use table 1 for Cb and Cr*/ jpeg_qtbl(jpeg->regs, 2, 1); jpeg_qtbl(jpeg->regs, 3, 1); /* Y, Cb, Cr use Huffman table 0 */ jpeg_htbl_ac(jpeg->regs, 1); jpeg_htbl_dc(jpeg->regs, 1); jpeg_htbl_ac(jpeg->regs, 2); jpeg_htbl_dc(jpeg->regs, 2); jpeg_htbl_ac(jpeg->regs, 3); jpeg_htbl_dc(jpeg->regs, 3); } else { /* S5P_JPEG_DECODE */ jpeg_rst_int_enable(jpeg->regs, true); jpeg_data_num_int_enable(jpeg->regs, true); jpeg_final_mcu_num_int_enable(jpeg->regs, true); if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV) jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422); else jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420); jpeg_jpgadr(jpeg->regs, src_addr); jpeg_imgadr(jpeg->regs, dst_addr); } jpeg_start(jpeg->regs); } static int s5p_jpeg_job_ready(void *priv) { struct s5p_jpeg_ctx *ctx = priv; if (ctx->mode == S5P_JPEG_DECODE) return ctx->hdr_parsed; return 1; } static void s5p_jpeg_job_abort(void *priv) { } static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = { .device_run = s5p_jpeg_device_run, .job_ready = s5p_jpeg_job_ready, .job_abort = s5p_jpeg_job_abort, }; /* * ============================================================================ * Queue operations * ============================================================================ */ static int s5p_jpeg_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); struct s5p_jpeg_q_data *q_data = NULL; unsigned int size, count = *nbuffers; q_data = get_q_data(ctx, vq->type); BUG_ON(q_data == NULL); size = q_data->size; /* * header is parsed during decoding and parsed information stored * in the context so we do not allow another buffer to overwrite it */ if (ctx->mode == S5P_JPEG_DECODE) count = 1; *nbuffers = count; *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = ctx->jpeg->alloc_ctx; return 0; } static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct s5p_jpeg_q_data *q_data = NULL; q_data = get_q_data(ctx, vb->vb2_queue->type); BUG_ON(q_data == NULL); if (vb2_plane_size(vb, 0) < q_data->size) { pr_err("%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), (long)q_data->size); return -EINVAL; } vb2_set_plane_payload(vb, 0, q_data->size); return 0; } static void s5p_jpeg_buf_queue(struct vb2_buffer *vb) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); if (ctx->mode == S5P_JPEG_DECODE && vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { struct s5p_jpeg_q_data tmp, *q_data; ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp, (unsigned long)vb2_plane_vaddr(vb, 0), min((unsigned long)ctx->out_q.size, vb2_get_plane_payload(vb, 0))); if (!ctx->hdr_parsed) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); return; } q_data = &ctx->out_q; q_data->w = tmp.w; q_data->h = tmp.h; q_data = &ctx->cap_q; q_data->w = tmp.w; q_data->h = tmp.h; jpeg_bound_align_image(&q_data->w, S5P_JPEG_MIN_WIDTH, S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align, &q_data->h, S5P_JPEG_MIN_HEIGHT, S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align ); q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3; } if (ctx->m2m_ctx) v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); } static void s5p_jpeg_wait_prepare(struct vb2_queue *vq) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); mutex_unlock(&ctx->jpeg->lock); } static void s5p_jpeg_wait_finish(struct vb2_queue *vq) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); mutex_lock(&ctx->jpeg->lock); } static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); int ret; ret = pm_runtime_get_sync(ctx->jpeg->dev); return ret > 0 ? 0 : ret; } static int s5p_jpeg_stop_streaming(struct vb2_queue *q) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); pm_runtime_put(ctx->jpeg->dev); return 0; } static struct vb2_ops s5p_jpeg_qops = { .queue_setup = s5p_jpeg_queue_setup, .buf_prepare = s5p_jpeg_buf_prepare, .buf_queue = s5p_jpeg_buf_queue, .wait_prepare = s5p_jpeg_wait_prepare, .wait_finish = s5p_jpeg_wait_finish, .start_streaming = s5p_jpeg_start_streaming, .stop_streaming = s5p_jpeg_stop_streaming, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct s5p_jpeg_ctx *ctx = priv; int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_USERPTR; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &s5p_jpeg_qops; src_vq->mem_ops = &vb2_dma_contig_memops; ret = vb2_queue_init(src_vq); if (ret) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &s5p_jpeg_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; return vb2_queue_init(dst_vq); } /* * ============================================================================ * ISR * ============================================================================ */ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id) { struct s5p_jpeg *jpeg = dev_id; struct s5p_jpeg_ctx *curr_ctx; struct vb2_buffer *src_buf, *dst_buf; unsigned long payload_size = 0; enum vb2_buffer_state state = VB2_BUF_STATE_DONE; bool enc_jpeg_too_large = false; bool timer_elapsed = false; bool op_completed = false; spin_lock(&jpeg->slock); curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev); src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); if (curr_ctx->mode == S5P_JPEG_ENCODE) enc_jpeg_too_large = jpeg_enc_stream_stat(jpeg->regs); timer_elapsed = jpeg_timer_stat(jpeg->regs); op_completed = jpeg_result_stat_ok(jpeg->regs); if (curr_ctx->mode == S5P_JPEG_DECODE) op_completed = op_completed && jpeg_stream_stat_ok(jpeg->regs); if (enc_jpeg_too_large) { state = VB2_BUF_STATE_ERROR; jpeg_clear_enc_stream_stat(jpeg->regs); } else if (timer_elapsed) { state = VB2_BUF_STATE_ERROR; jpeg_clear_timer_stat(jpeg->regs); } else if (!op_completed) { state = VB2_BUF_STATE_ERROR; } else { payload_size = jpeg_compressed_size(jpeg->regs); } v4l2_m2m_buf_done(src_buf, state); if (curr_ctx->mode == S5P_JPEG_ENCODE) vb2_set_plane_payload(dst_buf, 0, payload_size); v4l2_m2m_buf_done(dst_buf, state); v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->m2m_ctx); curr_ctx->subsampling = jpeg_get_subsampling_mode(jpeg->regs); spin_unlock(&jpeg->slock); jpeg_clear_int(jpeg->regs); return IRQ_HANDLED; } /* * ============================================================================ * Driver basic infrastructure * ============================================================================ */ static int s5p_jpeg_probe(struct platform_device *pdev) { struct s5p_jpeg *jpeg; struct resource *res; int ret; /* JPEG IP abstraction struct */ jpeg = kzalloc(sizeof(struct s5p_jpeg), GFP_KERNEL); if (!jpeg) return -ENOMEM; mutex_init(&jpeg->lock); spin_lock_init(&jpeg->slock); jpeg->dev = &pdev->dev; /* memory-mapped registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot find IO resource\n"); ret = -ENOENT; goto jpeg_alloc_rollback; } jpeg->ioarea = request_mem_region(res->start, resource_size(res), pdev->name); if (!jpeg->ioarea) { dev_err(&pdev->dev, "cannot request IO\n"); ret = -ENXIO; goto jpeg_alloc_rollback; } jpeg->regs = ioremap(res->start, resource_size(res)); if (!jpeg->regs) { dev_err(&pdev->dev, "cannot map IO\n"); ret = -ENXIO; goto mem_region_rollback; } dev_dbg(&pdev->dev, "registers %p (%p, %p)\n", jpeg->regs, jpeg->ioarea, res); /* interrupt service routine registration */ jpeg->irq = ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(&pdev->dev, "cannot find IRQ\n"); goto ioremap_rollback; } ret = request_irq(jpeg->irq, s5p_jpeg_irq, 0, dev_name(&pdev->dev), jpeg); if (ret) { dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq); goto ioremap_rollback; } /* clocks */ jpeg->clk = clk_get(&pdev->dev, "jpeg"); if (IS_ERR(jpeg->clk)) { dev_err(&pdev->dev, "cannot get clock\n"); ret = PTR_ERR(jpeg->clk); goto request_irq_rollback; } dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk); clk_enable(jpeg->clk); /* v4l2 device */ ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev); if (ret) { dev_err(&pdev->dev, "Failed to register v4l2 device\n"); goto clk_get_rollback; } /* mem2mem device */ jpeg->m2m_dev = v4l2_m2m_init(&s5p_jpeg_m2m_ops); if (IS_ERR(jpeg->m2m_dev)) { v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(jpeg->m2m_dev); goto device_register_rollback; } jpeg->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(jpeg->alloc_ctx)) { v4l2_err(&jpeg->v4l2_dev, "Failed to init memory allocator\n"); ret = PTR_ERR(jpeg->alloc_ctx); goto m2m_init_rollback; } /* JPEG encoder /dev/videoX node */ jpeg->vfd_encoder = video_device_alloc(); if (!jpeg->vfd_encoder) { v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto vb2_allocator_rollback; } strlcpy(jpeg->vfd_encoder->name, S5P_JPEG_M2M_NAME, sizeof(jpeg->vfd_encoder->name)); jpeg->vfd_encoder->fops = &s5p_jpeg_fops; jpeg->vfd_encoder->ioctl_ops = &s5p_jpeg_ioctl_ops; jpeg->vfd_encoder->minor = -1; jpeg->vfd_encoder->release = video_device_release; jpeg->vfd_encoder->lock = &jpeg->lock; jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev; ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n"); goto enc_vdev_alloc_rollback; } video_set_drvdata(jpeg->vfd_encoder, jpeg); v4l2_info(&jpeg->v4l2_dev, "encoder device registered as /dev/video%d\n", jpeg->vfd_encoder->num); /* JPEG decoder /dev/videoX node */ jpeg->vfd_decoder = video_device_alloc(); if (!jpeg->vfd_decoder) { v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto enc_vdev_register_rollback; } strlcpy(jpeg->vfd_decoder->name, S5P_JPEG_M2M_NAME, sizeof(jpeg->vfd_decoder->name)); jpeg->vfd_decoder->fops = &s5p_jpeg_fops; jpeg->vfd_decoder->ioctl_ops = &s5p_jpeg_ioctl_ops; jpeg->vfd_decoder->minor = -1; jpeg->vfd_decoder->release = video_device_release; jpeg->vfd_decoder->lock = &jpeg->lock; jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev; ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n"); goto dec_vdev_alloc_rollback; } video_set_drvdata(jpeg->vfd_decoder, jpeg); v4l2_info(&jpeg->v4l2_dev, "decoder device registered as /dev/video%d\n", jpeg->vfd_decoder->num); /* final statements & power management */ platform_set_drvdata(pdev, jpeg); pm_runtime_enable(&pdev->dev); v4l2_info(&jpeg->v4l2_dev, "Samsung S5P JPEG codec\n"); return 0; dec_vdev_alloc_rollback: video_device_release(jpeg->vfd_decoder); enc_vdev_register_rollback: video_unregister_device(jpeg->vfd_encoder); enc_vdev_alloc_rollback: video_device_release(jpeg->vfd_encoder); vb2_allocator_rollback: vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx); m2m_init_rollback: v4l2_m2m_release(jpeg->m2m_dev); device_register_rollback: v4l2_device_unregister(&jpeg->v4l2_dev); clk_get_rollback: clk_disable(jpeg->clk); clk_put(jpeg->clk); request_irq_rollback: free_irq(jpeg->irq, jpeg); ioremap_rollback: iounmap(jpeg->regs); mem_region_rollback: release_resource(jpeg->ioarea); release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea)); jpeg_alloc_rollback: kfree(jpeg); return ret; } static int s5p_jpeg_remove(struct platform_device *pdev) { struct s5p_jpeg *jpeg = platform_get_drvdata(pdev); pm_runtime_disable(jpeg->dev); video_unregister_device(jpeg->vfd_decoder); video_device_release(jpeg->vfd_decoder); video_unregister_device(jpeg->vfd_encoder); video_device_release(jpeg->vfd_encoder); vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx); v4l2_m2m_release(jpeg->m2m_dev); v4l2_device_unregister(&jpeg->v4l2_dev); clk_disable(jpeg->clk); clk_put(jpeg->clk); free_irq(jpeg->irq, jpeg); iounmap(jpeg->regs); release_resource(jpeg->ioarea); release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea)); kfree(jpeg); return 0; } static int s5p_jpeg_runtime_suspend(struct device *dev) { return 0; } static int s5p_jpeg_runtime_resume(struct device *dev) { struct s5p_jpeg *jpeg = dev_get_drvdata(dev); /* * JPEG IP allows storing two Huffman tables for each component * We fill table 0 for each component */ jpeg_set_hdctbl(jpeg->regs); jpeg_set_hdctblg(jpeg->regs); jpeg_set_hactbl(jpeg->regs); jpeg_set_hactblg(jpeg->regs); return 0; } static const struct dev_pm_ops s5p_jpeg_pm_ops = { .runtime_suspend = s5p_jpeg_runtime_suspend, .runtime_resume = s5p_jpeg_runtime_resume, }; static struct platform_driver s5p_jpeg_driver = { .probe = s5p_jpeg_probe, .remove = s5p_jpeg_remove, .driver = { .owner = THIS_MODULE, .name = S5P_JPEG_M2M_NAME, .pm = &s5p_jpeg_pm_ops, }, }; static int __init s5p_jpeg_register(void) { int ret; pr_info("S5P JPEG V4L2 Driver, (c) 2011 Samsung Electronics\n"); ret = platform_driver_register(&s5p_jpeg_driver); if (ret) pr_err("%s: failed to register jpeg driver\n", __func__); return ret; } static void __exit s5p_jpeg_unregister(void) { platform_driver_unregister(&s5p_jpeg_driver); } module_init(s5p_jpeg_register); module_exit(s5p_jpeg_unregister); MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>"); MODULE_DESCRIPTION("Samsung JPEG codec driver"); MODULE_LICENSE("GPL");
gpl-2.0
HTCKernels/One-SV-international-k2u
drivers/video/mb862xx/mb862xxfbdrv.c
5059
30791
/* * drivers/mb862xx/mb862xxfb.c * * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver * * (C) 2008 Anatolij Gustschin <agust@denx.de> * DENX Software Engineering * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #undef DEBUG #include <linux/fb.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #if defined(CONFIG_OF) #include <linux/of_platform.h> #endif #include "mb862xxfb.h" #include "mb862xx_reg.h" #define NR_PALETTE 256 #define MB862XX_MEM_SIZE 0x1000000 #define CORALP_MEM_SIZE 0x2000000 #define CARMINE_MEM_SIZE 0x8000000 #define DRV_NAME "mb862xxfb" #if defined(CONFIG_SOCRATES) static struct mb862xx_gc_mode socrates_gc_mode = { /* Mode for Prime View PM070WL4 TFT LCD Panel */ { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 }, /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */ 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63 }; #endif /* Helpers */ static inline int h_total(struct fb_var_screeninfo *var) { return var->xres + var->left_margin + var->right_margin + var->hsync_len; } static inline int v_total(struct fb_var_screeninfo *var) { return var->yres + var->upper_margin + var->lower_margin + var->vsync_len; } static inline int hsp(struct fb_var_screeninfo *var) { return var->xres + var->right_margin - 1; } static inline int vsp(struct fb_var_screeninfo *var) { return var->yres + var->lower_margin - 1; } static inline int d_pitch(struct fb_var_screeninfo *var) { return var->xres * var->bits_per_pixel / 8; } static inline unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int mb862xxfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct mb862xxfb_par *par = info->par; unsigned int val; switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: if (regno < 16) { val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue, &info->var.blue); par->pseudo_palette[regno] = val; } break; case FB_VISUAL_PSEUDOCOLOR: if (regno < 256) { val = (red >> 8) << 16; val |= (green >> 8) << 8; val |= blue >> 8; outreg(disp, GC_L0PAL0 + (regno * 4), val); } break; default: return 1; /* unsupported type */ } return 0; } static int mb862xxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) { unsigned long tmp; if (fbi->dev) dev_dbg(fbi->dev, "%s\n", __func__); /* check if these values fit into the registers */ if (var->hsync_len > 255 || var->vsync_len > 255) return -EINVAL; if ((var->xres + var->right_margin) >= 4096) return -EINVAL; if ((var->yres + var->lower_margin) > 4096) return -EINVAL; if (h_total(var) > 4096 || v_total(var) > 4096) return -EINVAL; if (var->xres_virtual > 4096 || var->yres_virtual > 4096) return -EINVAL; if (var->bits_per_pixel <= 8) var->bits_per_pixel = 8; else if (var->bits_per_pixel <= 16) var->bits_per_pixel = 16; else if (var->bits_per_pixel <= 32) var->bits_per_pixel = 32; /* * can cope with 8,16 or 24/32bpp if resulting * pitch is divisible by 64 without remainder */ if (d_pitch(&fbi->var) % GC_L0M_L0W_UNIT) { int r; var->bits_per_pixel = 0; do { var->bits_per_pixel += 8; r = d_pitch(&fbi->var) % GC_L0M_L0W_UNIT; } while (r && var->bits_per_pixel <= 32); if (d_pitch(&fbi->var) % GC_L0M_L0W_UNIT) return -EINVAL; } /* line length is going to be 128 bit aligned */ tmp = (var->xres * var->bits_per_pixel) / 8; if ((tmp & 15) != 0) return -EINVAL; /* set r/g/b positions and validate bpp */ switch (var->bits_per_pixel) { case 8: var->red.length = var->bits_per_pixel; var->green.length = var->bits_per_pixel; var->blue.length = var->bits_per_pixel; var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->transp.length = 0; break; case 16: var->red.length = 5; var->green.length = 5; var->blue.length = 5; var->red.offset = 10; var->green.offset = 5; var->blue.offset = 0; var->transp.length = 0; break; case 24: case 32: var->transp.length = 8; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.offset = 24; var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; break; default: return -EINVAL; } return 0; } /* * set display parameters */ static int mb862xxfb_set_par(struct fb_info *fbi) { struct mb862xxfb_par *par = fbi->par; unsigned long reg, sc; dev_dbg(par->dev, "%s\n", __func__); if (par->type == BT_CORALP) mb862xxfb_init_accel(fbi, fbi->var.xres); if (par->pre_init) return 0; /* disp off */ reg = inreg(disp, GC_DCM1); reg &= ~GC_DCM01_DEN; outreg(disp, GC_DCM1, reg); /* set display reference clock div. */ sc = par->refclk / (1000000 / fbi->var.pixclock) - 1; reg = inreg(disp, GC_DCM1); reg &= ~(GC_DCM01_CKS | GC_DCM01_RESV | GC_DCM01_SC); reg |= sc << 8; outreg(disp, GC_DCM1, reg); dev_dbg(par->dev, "SC 0x%lx\n", sc); /* disp dimension, format */ reg = pack(d_pitch(&fbi->var) / GC_L0M_L0W_UNIT, (fbi->var.yres - 1)); if (fbi->var.bits_per_pixel == 16) reg |= GC_L0M_L0C_16; outreg(disp, GC_L0M, reg); if (fbi->var.bits_per_pixel == 32) { reg = inreg(disp, GC_L0EM); outreg(disp, GC_L0EM, reg | GC_L0EM_L0EC_24); } outreg(disp, GC_WY_WX, 0); reg = pack(fbi->var.yres - 1, fbi->var.xres); outreg(disp, GC_WH_WW, reg); outreg(disp, GC_L0OA0, 0); outreg(disp, GC_L0DA0, 0); outreg(disp, GC_L0DY_L0DX, 0); outreg(disp, GC_L0WY_L0WX, 0); outreg(disp, GC_L0WH_L0WW, reg); /* both HW-cursors off */ reg = inreg(disp, GC_CPM_CUTC); reg &= ~(GC_CPM_CEN0 | GC_CPM_CEN1); outreg(disp, GC_CPM_CUTC, reg); /* timings */ reg = pack(fbi->var.xres - 1, fbi->var.xres - 1); outreg(disp, GC_HDB_HDP, reg); reg = pack((fbi->var.yres - 1), vsp(&fbi->var)); outreg(disp, GC_VDP_VSP, reg); reg = ((fbi->var.vsync_len - 1) << 24) | pack((fbi->var.hsync_len - 1), hsp(&fbi->var)); outreg(disp, GC_VSW_HSW_HSP, reg); outreg(disp, GC_HTP, pack(h_total(&fbi->var) - 1, 0)); outreg(disp, GC_VTR, pack(v_total(&fbi->var) - 1, 0)); /* display on */ reg = inreg(disp, GC_DCM1); reg |= GC_DCM01_DEN | GC_DCM01_L0E; reg &= ~GC_DCM01_ESY; outreg(disp, GC_DCM1, reg); return 0; } static int mb862xxfb_pan(struct fb_var_screeninfo *var, struct fb_info *info) { struct mb862xxfb_par *par = info->par; unsigned long reg; reg = pack(var->yoffset, var->xoffset); outreg(disp, GC_L0WY_L0WX, reg); reg = pack(info->var.yres_virtual, info->var.xres_virtual); outreg(disp, GC_L0WH_L0WW, reg); return 0; } static int mb862xxfb_blank(int mode, struct fb_info *fbi) { struct mb862xxfb_par *par = fbi->par; unsigned long reg; dev_dbg(fbi->dev, "blank mode=%d\n", mode); switch (mode) { case FB_BLANK_POWERDOWN: reg = inreg(disp, GC_DCM1); reg &= ~GC_DCM01_DEN; outreg(disp, GC_DCM1, reg); break; case FB_BLANK_UNBLANK: reg = inreg(disp, GC_DCM1); reg |= GC_DCM01_DEN; outreg(disp, GC_DCM1, reg); break; case FB_BLANK_NORMAL: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: default: return 1; } return 0; } static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) { struct mb862xxfb_par *par = fbi->par; struct mb862xx_l1_cfg *l1_cfg = &par->l1_cfg; void __user *argp = (void __user *)arg; int *enable; u32 l1em = 0; switch (cmd) { case MB862XX_L1_GET_CFG: if (copy_to_user(argp, l1_cfg, sizeof(*l1_cfg))) return -EFAULT; break; case MB862XX_L1_SET_CFG: if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) return -EFAULT; if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { /* downscaling */ outreg(cap, GC_CAP_CSC, pack((l1_cfg->sh << 11) / l1_cfg->dh, (l1_cfg->sw << 11) / l1_cfg->dw)); l1em = inreg(disp, GC_L1EM); l1em &= ~GC_L1EM_DM; } else if ((l1_cfg->sw <= l1_cfg->dw) && (l1_cfg->sh <= l1_cfg->dh)) { /* upscaling */ outreg(cap, GC_CAP_CSC, pack((l1_cfg->sh << 11) / l1_cfg->dh, (l1_cfg->sw << 11) / l1_cfg->dw)); outreg(cap, GC_CAP_CMSS, pack(l1_cfg->sw >> 1, l1_cfg->sh)); outreg(cap, GC_CAP_CMDS, pack(l1_cfg->dw >> 1, l1_cfg->dh)); l1em = inreg(disp, GC_L1EM); l1em |= GC_L1EM_DM; } if (l1_cfg->mirror) { outreg(cap, GC_CAP_CBM, inreg(cap, GC_CAP_CBM) | GC_CBM_HRV); l1em |= l1_cfg->dw * 2 - 8; } else { outreg(cap, GC_CAP_CBM, inreg(cap, GC_CAP_CBM) & ~GC_CBM_HRV); l1em &= 0xffff0000; } outreg(disp, GC_L1EM, l1em); break; case MB862XX_L1_ENABLE: enable = (int *)arg; if (*enable) { outreg(disp, GC_L1DA, par->cap_buf); outreg(cap, GC_CAP_IMG_START, pack(l1_cfg->sy >> 1, l1_cfg->sx)); outreg(cap, GC_CAP_IMG_END, pack(l1_cfg->sh, l1_cfg->sw)); outreg(disp, GC_L1M, GC_L1M_16 | GC_L1M_YC | GC_L1M_CS | (par->l1_stride << 16)); outreg(disp, GC_L1WY_L1WX, pack(l1_cfg->dy, l1_cfg->dx)); outreg(disp, GC_L1WH_L1WW, pack(l1_cfg->dh - 1, l1_cfg->dw)); outreg(disp, GC_DLS, 1); outreg(cap, GC_CAP_VCM, GC_VCM_VIE | GC_VCM_CM | GC_VCM_VS_PAL); outreg(disp, GC_DCM1, inreg(disp, GC_DCM1) | GC_DCM1_DEN | GC_DCM1_L1E); } else { outreg(cap, GC_CAP_VCM, inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE); outreg(disp, GC_DCM1, inreg(disp, GC_DCM1) & ~GC_DCM1_L1E); } break; case MB862XX_L1_CAP_CTL: enable = (int *)arg; if (*enable) { outreg(cap, GC_CAP_VCM, inreg(cap, GC_CAP_VCM) | GC_VCM_VIE); } else { outreg(cap, GC_CAP_VCM, inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE); } break; default: return -EINVAL; } return 0; } /* framebuffer ops */ static struct fb_ops mb862xxfb_ops = { .owner = THIS_MODULE, .fb_check_var = mb862xxfb_check_var, .fb_set_par = mb862xxfb_set_par, .fb_setcolreg = mb862xxfb_setcolreg, .fb_blank = mb862xxfb_blank, .fb_pan_display = mb862xxfb_pan, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_ioctl = mb862xxfb_ioctl, }; /* initialize fb_info data */ static int mb862xxfb_init_fbinfo(struct fb_info *fbi) { struct mb862xxfb_par *par = fbi->par; struct mb862xx_gc_mode *mode = par->gc_mode; unsigned long reg; int stride; fbi->fbops = &mb862xxfb_ops; fbi->pseudo_palette = par->pseudo_palette; fbi->screen_base = par->fb_base; fbi->screen_size = par->mapped_vram; strcpy(fbi->fix.id, DRV_NAME); fbi->fix.smem_start = (unsigned long)par->fb_base_phys; fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys; fbi->fix.mmio_len = par->mmio_len; fbi->fix.accel = FB_ACCEL_NONE; fbi->fix.type = FB_TYPE_PACKED_PIXELS; fbi->fix.type_aux = 0; fbi->fix.xpanstep = 1; fbi->fix.ypanstep = 1; fbi->fix.ywrapstep = 0; reg = inreg(disp, GC_DCM1); if (reg & GC_DCM01_DEN && reg & GC_DCM01_L0E) { /* get the disp mode from active display cfg */ unsigned long sc = ((reg & GC_DCM01_SC) >> 8) + 1; unsigned long hsp, vsp, ht, vt; dev_dbg(par->dev, "using bootloader's disp. mode\n"); fbi->var.pixclock = (sc * 1000000) / par->refclk; fbi->var.xres = (inreg(disp, GC_HDB_HDP) & 0x0fff) + 1; reg = inreg(disp, GC_VDP_VSP); fbi->var.yres = ((reg >> 16) & 0x0fff) + 1; vsp = (reg & 0x0fff) + 1; fbi->var.xres_virtual = fbi->var.xres; fbi->var.yres_virtual = fbi->var.yres; reg = inreg(disp, GC_L0EM); if (reg & GC_L0EM_L0EC_24) { fbi->var.bits_per_pixel = 32; } else { reg = inreg(disp, GC_L0M); if (reg & GC_L0M_L0C_16) fbi->var.bits_per_pixel = 16; else fbi->var.bits_per_pixel = 8; } reg = inreg(disp, GC_VSW_HSW_HSP); fbi->var.hsync_len = ((reg & 0xff0000) >> 16) + 1; fbi->var.vsync_len = ((reg & 0x3f000000) >> 24) + 1; hsp = (reg & 0xffff) + 1; ht = ((inreg(disp, GC_HTP) & 0xfff0000) >> 16) + 1; fbi->var.right_margin = hsp - fbi->var.xres; fbi->var.left_margin = ht - hsp - fbi->var.hsync_len; vt = ((inreg(disp, GC_VTR) & 0xfff0000) >> 16) + 1; fbi->var.lower_margin = vsp - fbi->var.yres; fbi->var.upper_margin = vt - vsp - fbi->var.vsync_len; } else if (mode) { dev_dbg(par->dev, "using supplied mode\n"); fb_videomode_to_var(&fbi->var, (struct fb_videomode *)mode); fbi->var.bits_per_pixel = mode->def_bpp ? mode->def_bpp : 8; } else { int ret; ret = fb_find_mode(&fbi->var, fbi, "640x480-16@60", NULL, 0, NULL, 16); if (ret == 0 || ret == 4) { dev_err(par->dev, "failed to get initial mode\n"); return -EINVAL; } } fbi->var.xoffset = 0; fbi->var.yoffset = 0; fbi->var.grayscale = 0; fbi->var.nonstd = 0; fbi->var.height = -1; fbi->var.width = -1; fbi->var.accel_flags = 0; fbi->var.vmode = FB_VMODE_NONINTERLACED; fbi->var.activate = FB_ACTIVATE_NOW; fbi->flags = FBINFO_DEFAULT | #ifdef __BIG_ENDIAN FBINFO_FOREIGN_ENDIAN | #endif FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; /* check and possibly fix bpp */ if ((fbi->fbops->fb_check_var)(&fbi->var, fbi)) dev_err(par->dev, "check_var() failed on initial setup?\n"); fbi->fix.visual = fbi->var.bits_per_pixel == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; fbi->fix.line_length = (fbi->var.xres_virtual * fbi->var.bits_per_pixel) / 8; fbi->fix.smem_len = fbi->fix.line_length * fbi->var.yres_virtual; /* * reserve space for capture buffers and two cursors * at the end of vram: 720x576 * 2 * 2.2 + 64x64 * 16. */ par->cap_buf = par->mapped_vram - 0x1bd800 - 0x10000; par->cap_len = 0x1bd800; par->l1_cfg.sx = 0; par->l1_cfg.sy = 0; par->l1_cfg.sw = 720; par->l1_cfg.sh = 576; par->l1_cfg.dx = 0; par->l1_cfg.dy = 0; par->l1_cfg.dw = 720; par->l1_cfg.dh = 576; stride = par->l1_cfg.sw * (fbi->var.bits_per_pixel / 8); par->l1_stride = stride / 64 + ((stride % 64) ? 1 : 0); outreg(cap, GC_CAP_CBM, GC_CBM_OO | GC_CBM_CBST | (par->l1_stride << 16)); outreg(cap, GC_CAP_CBOA, par->cap_buf); outreg(cap, GC_CAP_CBLA, par->cap_buf + par->cap_len); return 0; } /* * show some display controller and cursor registers */ static ssize_t mb862xxfb_show_dispregs(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct mb862xxfb_par *par = fbi->par; char *ptr = buf; unsigned int reg; for (reg = GC_DCM0; reg <= GC_L0DY_L0DX; reg += 4) ptr += sprintf(ptr, "%08x = %08x\n", reg, inreg(disp, reg)); for (reg = GC_CPM_CUTC; reg <= GC_CUY1_CUX1; reg += 4) ptr += sprintf(ptr, "%08x = %08x\n", reg, inreg(disp, reg)); for (reg = GC_DCM1; reg <= GC_L0WH_L0WW; reg += 4) ptr += sprintf(ptr, "%08x = %08x\n", reg, inreg(disp, reg)); for (reg = 0x400; reg <= 0x410; reg += 4) ptr += sprintf(ptr, "geo %08x = %08x\n", reg, inreg(geo, reg)); for (reg = 0x400; reg <= 0x410; reg += 4) ptr += sprintf(ptr, "draw %08x = %08x\n", reg, inreg(draw, reg)); for (reg = 0x440; reg <= 0x450; reg += 4) ptr += sprintf(ptr, "draw %08x = %08x\n", reg, inreg(draw, reg)); return ptr - buf; } static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL); irqreturn_t mb862xx_intr(int irq, void *dev_id) { struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id; unsigned long reg_ist, mask; if (!par) return IRQ_NONE; if (par->type == BT_CARMINE) { /* Get Interrupt Status */ reg_ist = inreg(ctrl, GC_CTRL_STATUS); mask = inreg(ctrl, GC_CTRL_INT_MASK); if (reg_ist == 0) return IRQ_HANDLED; reg_ist &= mask; if (reg_ist == 0) return IRQ_HANDLED; /* Clear interrupt status */ outreg(ctrl, 0x0, reg_ist); } else { /* Get status */ reg_ist = inreg(host, GC_IST); mask = inreg(host, GC_IMASK); reg_ist &= mask; if (reg_ist == 0) return IRQ_HANDLED; /* Clear status */ outreg(host, GC_IST, ~reg_ist); } return IRQ_HANDLED; } #if defined(CONFIG_FB_MB862XX_LIME) /* * GDC (Lime, Coral(B/Q), Mint, ...) on host bus */ static int mb862xx_gdc_init(struct mb862xxfb_par *par) { unsigned long ccf, mmr; unsigned long ver, rev; if (!par) return -ENODEV; #if defined(CONFIG_FB_PRE_INIT_FB) par->pre_init = 1; #endif par->host = par->mmio_base; par->i2c = par->mmio_base + MB862XX_I2C_BASE; par->disp = par->mmio_base + MB862XX_DISP_BASE; par->cap = par->mmio_base + MB862XX_CAP_BASE; par->draw = par->mmio_base + MB862XX_DRAW_BASE; par->geo = par->mmio_base + MB862XX_GEO_BASE; par->pio = par->mmio_base + MB862XX_PIO_BASE; par->refclk = GC_DISP_REFCLK_400; ver = inreg(host, GC_CID); rev = inreg(pio, GC_REVISION); if ((ver == 0x303) && (rev & 0xffffff00) == 0x20050100) { dev_info(par->dev, "Fujitsu Lime v1.%d found\n", (int)rev & 0xff); par->type = BT_LIME; ccf = par->gc_mode ? par->gc_mode->ccf : GC_CCF_COT_100; mmr = par->gc_mode ? par->gc_mode->mmr : 0x414fb7f2; } else { dev_info(par->dev, "? GDC, CID/Rev.: 0x%lx/0x%lx \n", ver, rev); return -ENODEV; } if (!par->pre_init) { outreg(host, GC_CCF, ccf); udelay(200); outreg(host, GC_MMR, mmr); udelay(10); } /* interrupt status */ outreg(host, GC_IST, 0); outreg(host, GC_IMASK, GC_INT_EN); return 0; } static int __devinit of_platform_mb862xx_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct device *dev = &ofdev->dev; struct mb862xxfb_par *par; struct fb_info *info; struct resource res; resource_size_t res_size; unsigned long ret = -ENODEV; if (of_address_to_resource(np, 0, &res)) { dev_err(dev, "Invalid address\n"); return -ENXIO; } info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev); if (info == NULL) { dev_err(dev, "cannot allocate framebuffer\n"); return -ENOMEM; } par = info->par; par->info = info; par->dev = dev; par->irq = irq_of_parse_and_map(np, 0); if (par->irq == NO_IRQ) { dev_err(dev, "failed to map irq\n"); ret = -ENODEV; goto fbrel; } res_size = resource_size(&res); par->res = request_mem_region(res.start, res_size, DRV_NAME); if (par->res == NULL) { dev_err(dev, "Cannot claim framebuffer/mmio\n"); ret = -ENXIO; goto irqdisp; } #if defined(CONFIG_SOCRATES) par->gc_mode = &socrates_gc_mode; #endif par->fb_base_phys = res.start; par->mmio_base_phys = res.start + MB862XX_MMIO_BASE; par->mmio_len = MB862XX_MMIO_SIZE; if (par->gc_mode) par->mapped_vram = par->gc_mode->max_vram; else par->mapped_vram = MB862XX_MEM_SIZE; par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram); if (par->fb_base == NULL) { dev_err(dev, "Cannot map framebuffer\n"); goto rel_reg; } par->mmio_base = ioremap(par->mmio_base_phys, par->mmio_len); if (par->mmio_base == NULL) { dev_err(dev, "Cannot map registers\n"); goto fb_unmap; } dev_dbg(dev, "fb phys 0x%llx 0x%lx\n", (u64)par->fb_base_phys, (ulong)par->mapped_vram); dev_dbg(dev, "mmio phys 0x%llx 0x%lx, (irq = %d)\n", (u64)par->mmio_base_phys, (ulong)par->mmio_len, par->irq); if (mb862xx_gdc_init(par)) goto io_unmap; if (request_irq(par->irq, mb862xx_intr, 0, DRV_NAME, (void *)par)) { dev_err(dev, "Cannot request irq\n"); goto io_unmap; } mb862xxfb_init_fbinfo(info); if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0) < 0) { dev_err(dev, "Could not allocate cmap for fb_info.\n"); goto free_irq; } if ((info->fbops->fb_set_par)(info)) dev_err(dev, "set_var() failed on initial setup?\n"); if (register_framebuffer(info)) { dev_err(dev, "failed to register framebuffer\n"); goto rel_cmap; } dev_set_drvdata(dev, info); if (device_create_file(dev, &dev_attr_dispregs)) dev_err(dev, "Can't create sysfs regdump file\n"); return 0; rel_cmap: fb_dealloc_cmap(&info->cmap); free_irq: outreg(host, GC_IMASK, 0); free_irq(par->irq, (void *)par); io_unmap: iounmap(par->mmio_base); fb_unmap: iounmap(par->fb_base); rel_reg: release_mem_region(res.start, res_size); irqdisp: irq_dispose_mapping(par->irq); fbrel: dev_set_drvdata(dev, NULL); framebuffer_release(info); return ret; } static int __devexit of_platform_mb862xx_remove(struct platform_device *ofdev) { struct fb_info *fbi = dev_get_drvdata(&ofdev->dev); struct mb862xxfb_par *par = fbi->par; resource_size_t res_size = resource_size(par->res); unsigned long reg; dev_dbg(fbi->dev, "%s release\n", fbi->fix.id); /* display off */ reg = inreg(disp, GC_DCM1); reg &= ~(GC_DCM01_DEN | GC_DCM01_L0E); outreg(disp, GC_DCM1, reg); /* disable interrupts */ outreg(host, GC_IMASK, 0); free_irq(par->irq, (void *)par); irq_dispose_mapping(par->irq); device_remove_file(&ofdev->dev, &dev_attr_dispregs); unregister_framebuffer(fbi); fb_dealloc_cmap(&fbi->cmap); iounmap(par->mmio_base); iounmap(par->fb_base); dev_set_drvdata(&ofdev->dev, NULL); release_mem_region(par->res->start, res_size); framebuffer_release(fbi); return 0; } /* * common types */ static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = { { .compatible = "fujitsu,MB86276", }, { .compatible = "fujitsu,lime", }, { .compatible = "fujitsu,MB86277", }, { .compatible = "fujitsu,mint", }, { .compatible = "fujitsu,MB86293", }, { .compatible = "fujitsu,MB86294", }, { .compatible = "fujitsu,coral", }, { /* end */ } }; static struct platform_driver of_platform_mb862xxfb_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = of_platform_mb862xx_tbl, }, .probe = of_platform_mb862xx_probe, .remove = __devexit_p(of_platform_mb862xx_remove), }; #endif #if defined(CONFIG_FB_MB862XX_PCI_GDC) static int coralp_init(struct mb862xxfb_par *par) { int cn, ver; par->host = par->mmio_base; par->i2c = par->mmio_base + MB862XX_I2C_BASE; par->disp = par->mmio_base + MB862XX_DISP_BASE; par->cap = par->mmio_base + MB862XX_CAP_BASE; par->draw = par->mmio_base + MB862XX_DRAW_BASE; par->geo = par->mmio_base + MB862XX_GEO_BASE; par->pio = par->mmio_base + MB862XX_PIO_BASE; par->refclk = GC_DISP_REFCLK_400; if (par->mapped_vram >= 0x2000000) { /* relocate gdc registers space */ writel(1, par->fb_base + MB862XX_MMIO_BASE + GC_RSW); udelay(1); /* wait at least 20 bus cycles */ } ver = inreg(host, GC_CID); cn = (ver & GC_CID_CNAME_MSK) >> 8; ver = ver & GC_CID_VERSION_MSK; if (cn == 3) { unsigned long reg; dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\ (ver == 6) ? "P" : (ver == 8) ? "PA" : "?", par->pdev->revision); reg = inreg(disp, GC_DCM1); if (reg & GC_DCM01_DEN && reg & GC_DCM01_L0E) par->pre_init = 1; if (!par->pre_init) { outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133); udelay(200); outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL); udelay(10); } /* Clear interrupt status */ outreg(host, GC_IST, 0); } else { return -ENODEV; } mb862xx_i2c_init(par); return 0; } static int init_dram_ctrl(struct mb862xxfb_par *par) { unsigned long i = 0; /* * Set io mode first! Spec. says IC may be destroyed * if not set to SSTL2/LVCMOS before init. */ outreg(dram_ctrl, GC_DCTL_IOCONT1_IOCONT0, GC_EVB_DCTL_IOCONT1_IOCONT0); /* DRAM init */ outreg(dram_ctrl, GC_DCTL_MODE_ADD, GC_EVB_DCTL_MODE_ADD); outreg(dram_ctrl, GC_DCTL_SETTIME1_EMODE, GC_EVB_DCTL_SETTIME1_EMODE); outreg(dram_ctrl, GC_DCTL_REFRESH_SETTIME2, GC_EVB_DCTL_REFRESH_SETTIME2); outreg(dram_ctrl, GC_DCTL_RSV2_RSV1, GC_EVB_DCTL_RSV2_RSV1); outreg(dram_ctrl, GC_DCTL_DDRIF2_DDRIF1, GC_EVB_DCTL_DDRIF2_DDRIF1); outreg(dram_ctrl, GC_DCTL_RSV0_STATES, GC_EVB_DCTL_RSV0_STATES); /* DLL reset done? */ while ((inreg(dram_ctrl, GC_DCTL_RSV0_STATES) & GC_DCTL_STATES_MSK)) { udelay(GC_DCTL_INIT_WAIT_INTERVAL); if (i++ > GC_DCTL_INIT_WAIT_CNT) { dev_err(par->dev, "VRAM init failed.\n"); return -EINVAL; } } outreg(dram_ctrl, GC_DCTL_MODE_ADD, GC_EVB_DCTL_MODE_ADD_AFT_RST); outreg(dram_ctrl, GC_DCTL_RSV0_STATES, GC_EVB_DCTL_RSV0_STATES_AFT_RST); return 0; } static int carmine_init(struct mb862xxfb_par *par) { unsigned long reg; par->ctrl = par->mmio_base + MB86297_CTRL_BASE; par->i2c = par->mmio_base + MB86297_I2C_BASE; par->disp = par->mmio_base + MB86297_DISP0_BASE; par->disp1 = par->mmio_base + MB86297_DISP1_BASE; par->cap = par->mmio_base + MB86297_CAP0_BASE; par->cap1 = par->mmio_base + MB86297_CAP1_BASE; par->draw = par->mmio_base + MB86297_DRAW_BASE; par->dram_ctrl = par->mmio_base + MB86297_DRAMCTRL_BASE; par->wrback = par->mmio_base + MB86297_WRBACK_BASE; par->refclk = GC_DISP_REFCLK_533; /* warm up */ reg = GC_CTRL_CLK_EN_DRAM | GC_CTRL_CLK_EN_2D3D | GC_CTRL_CLK_EN_DISP0; outreg(ctrl, GC_CTRL_CLK_ENABLE, reg); /* check for engine module revision */ if (inreg(draw, GC_2D3D_REV) == GC_RE_REVISION) dev_info(par->dev, "Fujitsu Carmine GDC Rev.%d found\n", par->pdev->revision); else goto err_init; reg &= ~GC_CTRL_CLK_EN_2D3D; outreg(ctrl, GC_CTRL_CLK_ENABLE, reg); /* set up vram */ if (init_dram_ctrl(par) < 0) goto err_init; outreg(ctrl, GC_CTRL_INT_MASK, 0); return 0; err_init: outreg(ctrl, GC_CTRL_CLK_ENABLE, 0); return -EINVAL; } static inline int mb862xx_pci_gdc_init(struct mb862xxfb_par *par) { switch (par->type) { case BT_CORALP: return coralp_init(par); case BT_CARMINE: return carmine_init(par); default: return -ENODEV; } } #define CHIP_ID(id) \ { PCI_DEVICE(PCI_VENDOR_ID_FUJITSU_LIMITED, id) } static struct pci_device_id mb862xx_pci_tbl[] __devinitdata = { /* MB86295/MB86296 */ CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALP), CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALPA), /* MB86297 */ CHIP_ID(PCI_DEVICE_ID_FUJITSU_CARMINE), { 0, } }; MODULE_DEVICE_TABLE(pci, mb862xx_pci_tbl); static int __devinit mb862xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct mb862xxfb_par *par; struct fb_info *info; struct device *dev = &pdev->dev; int ret; ret = pci_enable_device(pdev); if (ret < 0) { dev_err(dev, "Cannot enable PCI device\n"); goto out; } info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev); if (!info) { dev_err(dev, "framebuffer alloc failed\n"); ret = -ENOMEM; goto dis_dev; } par = info->par; par->info = info; par->dev = dev; par->pdev = pdev; par->irq = pdev->irq; ret = pci_request_regions(pdev, DRV_NAME); if (ret < 0) { dev_err(dev, "Cannot reserve region(s) for PCI device\n"); goto rel_fb; } switch (pdev->device) { case PCI_DEVICE_ID_FUJITSU_CORALP: case PCI_DEVICE_ID_FUJITSU_CORALPA: par->fb_base_phys = pci_resource_start(par->pdev, 0); par->mapped_vram = CORALP_MEM_SIZE; if (par->mapped_vram >= 0x2000000) { par->mmio_base_phys = par->fb_base_phys + MB862XX_MMIO_HIGH_BASE; } else { par->mmio_base_phys = par->fb_base_phys + MB862XX_MMIO_BASE; } par->mmio_len = MB862XX_MMIO_SIZE; par->type = BT_CORALP; break; case PCI_DEVICE_ID_FUJITSU_CARMINE: par->fb_base_phys = pci_resource_start(par->pdev, 2); par->mmio_base_phys = pci_resource_start(par->pdev, 3); par->mmio_len = pci_resource_len(par->pdev, 3); par->mapped_vram = CARMINE_MEM_SIZE; par->type = BT_CARMINE; break; default: /* should never occur */ goto rel_reg; } par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram); if (par->fb_base == NULL) { dev_err(dev, "Cannot map framebuffer\n"); goto rel_reg; } par->mmio_base = ioremap(par->mmio_base_phys, par->mmio_len); if (par->mmio_base == NULL) { dev_err(dev, "Cannot map registers\n"); ret = -EIO; goto fb_unmap; } dev_dbg(dev, "fb phys 0x%llx 0x%lx\n", (unsigned long long)par->fb_base_phys, (ulong)par->mapped_vram); dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n", (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len); if (mb862xx_pci_gdc_init(par)) goto io_unmap; if (request_irq(par->irq, mb862xx_intr, IRQF_SHARED, DRV_NAME, (void *)par)) { dev_err(dev, "Cannot request irq\n"); goto io_unmap; } mb862xxfb_init_fbinfo(info); if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0) < 0) { dev_err(dev, "Could not allocate cmap for fb_info.\n"); ret = -ENOMEM; goto free_irq; } if ((info->fbops->fb_set_par)(info)) dev_err(dev, "set_var() failed on initial setup?\n"); ret = register_framebuffer(info); if (ret < 0) { dev_err(dev, "failed to register framebuffer\n"); goto rel_cmap; } pci_set_drvdata(pdev, info); if (device_create_file(dev, &dev_attr_dispregs)) dev_err(dev, "Can't create sysfs regdump file\n"); if (par->type == BT_CARMINE) outreg(ctrl, GC_CTRL_INT_MASK, GC_CARMINE_INT_EN); else outreg(host, GC_IMASK, GC_INT_EN); return 0; rel_cmap: fb_dealloc_cmap(&info->cmap); free_irq: free_irq(par->irq, (void *)par); io_unmap: iounmap(par->mmio_base); fb_unmap: iounmap(par->fb_base); rel_reg: pci_release_regions(pdev); rel_fb: framebuffer_release(info); dis_dev: pci_disable_device(pdev); out: return ret; } static void __devexit mb862xx_pci_remove(struct pci_dev *pdev) { struct fb_info *fbi = pci_get_drvdata(pdev); struct mb862xxfb_par *par = fbi->par; unsigned long reg; dev_dbg(fbi->dev, "%s release\n", fbi->fix.id); /* display off */ reg = inreg(disp, GC_DCM1); reg &= ~(GC_DCM01_DEN | GC_DCM01_L0E); outreg(disp, GC_DCM1, reg); if (par->type == BT_CARMINE) { outreg(ctrl, GC_CTRL_INT_MASK, 0); outreg(ctrl, GC_CTRL_CLK_ENABLE, 0); } else { outreg(host, GC_IMASK, 0); } mb862xx_i2c_exit(par); device_remove_file(&pdev->dev, &dev_attr_dispregs); pci_set_drvdata(pdev, NULL); unregister_framebuffer(fbi); fb_dealloc_cmap(&fbi->cmap); free_irq(par->irq, (void *)par); iounmap(par->mmio_base); iounmap(par->fb_base); pci_release_regions(pdev); framebuffer_release(fbi); pci_disable_device(pdev); } static struct pci_driver mb862xxfb_pci_driver = { .name = DRV_NAME, .id_table = mb862xx_pci_tbl, .probe = mb862xx_pci_probe, .remove = __devexit_p(mb862xx_pci_remove), }; #endif static int __devinit mb862xxfb_init(void) { int ret = -ENODEV; #if defined(CONFIG_FB_MB862XX_LIME) ret = platform_driver_register(&of_platform_mb862xxfb_driver); #endif #if defined(CONFIG_FB_MB862XX_PCI_GDC) ret = pci_register_driver(&mb862xxfb_pci_driver); #endif return ret; } static void __exit mb862xxfb_exit(void) { #if defined(CONFIG_FB_MB862XX_LIME) platform_driver_unregister(&of_platform_mb862xxfb_driver); #endif #if defined(CONFIG_FB_MB862XX_PCI_GDC) pci_unregister_driver(&mb862xxfb_pci_driver); #endif } module_init(mb862xxfb_init); module_exit(mb862xxfb_exit); MODULE_DESCRIPTION("Fujitsu MB862xx Framebuffer driver"); MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
pacerom/kernel_oneplus_msm8974
drivers/gpio/gpio-mxc.c
5059
12598
/* * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> * Copyright 2008 Juergen Beisert, kernel@pengutronix.de * * Based on code from Freescale, * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/basic_mmio_gpio.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/module.h> #include <asm-generic/bug.h> #include <asm/mach/irq.h> #define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START) enum mxc_gpio_hwtype { IMX1_GPIO, /* runs on i.mx1 */ IMX21_GPIO, /* runs on i.mx21 and i.mx27 */ IMX31_GPIO, /* runs on all other i.mx */ }; /* device type dependent stuff */ struct mxc_gpio_hwdata { unsigned dr_reg; unsigned gdir_reg; unsigned psr_reg; unsigned icr1_reg; unsigned icr2_reg; unsigned imr_reg; unsigned isr_reg; unsigned low_level; unsigned high_level; unsigned rise_edge; unsigned fall_edge; }; struct mxc_gpio_port { struct list_head node; void __iomem *base; int irq; int irq_high; int virtual_irq_start; struct bgpio_chip bgc; u32 both_edges; }; static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = { .dr_reg = 0x1c, .gdir_reg = 0x00, .psr_reg = 0x24, .icr1_reg = 0x28, .icr2_reg = 0x2c, .imr_reg = 0x30, .isr_reg = 0x34, .low_level = 0x03, .high_level = 0x02, .rise_edge = 0x00, .fall_edge = 0x01, }; static struct mxc_gpio_hwdata imx31_gpio_hwdata = { .dr_reg = 0x00, .gdir_reg = 0x04, .psr_reg = 0x08, .icr1_reg = 0x0c, .icr2_reg = 0x10, .imr_reg = 0x14, .isr_reg = 0x18, .low_level = 0x00, .high_level = 0x01, .rise_edge = 0x02, .fall_edge = 0x03, }; static enum mxc_gpio_hwtype mxc_gpio_hwtype; static struct mxc_gpio_hwdata *mxc_gpio_hwdata; #define GPIO_DR (mxc_gpio_hwdata->dr_reg) #define GPIO_GDIR (mxc_gpio_hwdata->gdir_reg) #define GPIO_PSR (mxc_gpio_hwdata->psr_reg) #define GPIO_ICR1 (mxc_gpio_hwdata->icr1_reg) #define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg) #define GPIO_IMR (mxc_gpio_hwdata->imr_reg) #define GPIO_ISR (mxc_gpio_hwdata->isr_reg) #define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level) #define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level) #define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge) #define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge) #define GPIO_INT_NONE 0x4 static struct platform_device_id mxc_gpio_devtype[] = { { .name = "imx1-gpio", .driver_data = IMX1_GPIO, }, { .name = "imx21-gpio", .driver_data = IMX21_GPIO, }, { .name = "imx31-gpio", .driver_data = IMX31_GPIO, }, { /* sentinel */ } }; static const struct of_device_id mxc_gpio_dt_ids[] = { { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], }, { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], }, { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], }, { /* sentinel */ } }; /* * MX2 has one interrupt *for all* gpio ports. The list is used * to save the references to all ports, so that mx2_gpio_irq_handler * can walk through all interrupt status registers. */ static LIST_HEAD(mxc_gpio_ports); /* Note: This driver assumes 32 GPIOs are handled in one register */ static int gpio_set_irq_type(struct irq_data *d, u32 type) { u32 gpio = irq_to_gpio(d->irq); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mxc_gpio_port *port = gc->private; u32 bit, val; int edge; void __iomem *reg = port->base; port->both_edges &= ~(1 << (gpio & 31)); switch (type) { case IRQ_TYPE_EDGE_RISING: edge = GPIO_INT_RISE_EDGE; break; case IRQ_TYPE_EDGE_FALLING: edge = GPIO_INT_FALL_EDGE; break; case IRQ_TYPE_EDGE_BOTH: val = gpio_get_value(gpio); if (val) { edge = GPIO_INT_LOW_LEV; pr_debug("mxc: set GPIO %d to low trigger\n", gpio); } else { edge = GPIO_INT_HIGH_LEV; pr_debug("mxc: set GPIO %d to high trigger\n", gpio); } port->both_edges |= 1 << (gpio & 31); break; case IRQ_TYPE_LEVEL_LOW: edge = GPIO_INT_LOW_LEV; break; case IRQ_TYPE_LEVEL_HIGH: edge = GPIO_INT_HIGH_LEV; break; default: return -EINVAL; } reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ bit = gpio & 0xf; val = readl(reg) & ~(0x3 << (bit << 1)); writel(val | (edge << (bit << 1)), reg); writel(1 << (gpio & 0x1f), port->base + GPIO_ISR); return 0; } static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) { void __iomem *reg = port->base; u32 bit, val; int edge; reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ bit = gpio & 0xf; val = readl(reg); edge = (val >> (bit << 1)) & 3; val &= ~(0x3 << (bit << 1)); if (edge == GPIO_INT_HIGH_LEV) { edge = GPIO_INT_LOW_LEV; pr_debug("mxc: switch GPIO %d to low trigger\n", gpio); } else if (edge == GPIO_INT_LOW_LEV) { edge = GPIO_INT_HIGH_LEV; pr_debug("mxc: switch GPIO %d to high trigger\n", gpio); } else { pr_err("mxc: invalid configuration for GPIO %d: %x\n", gpio, edge); return; } writel(val | (edge << (bit << 1)), reg); } /* handle 32 interrupts in one status register */ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) { u32 gpio_irq_no_base = port->virtual_irq_start; while (irq_stat != 0) { int irqoffset = fls(irq_stat) - 1; if (port->both_edges & (1 << irqoffset)) mxc_flip_edge(port, irqoffset); generic_handle_irq(gpio_irq_no_base + irqoffset); irq_stat &= ~(1 << irqoffset); } } /* MX1 and MX3 has one interrupt *per* gpio port */ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) { u32 irq_stat; struct mxc_gpio_port *port = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); irq_stat = readl(port->base + GPIO_ISR) & readl(port->base + GPIO_IMR); mxc_gpio_irq_handler(port, irq_stat); chained_irq_exit(chip, desc); } /* MX2 has one interrupt *for all* gpio ports */ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) { u32 irq_msk, irq_stat; struct mxc_gpio_port *port; /* walk through all interrupt status registers */ list_for_each_entry(port, &mxc_gpio_ports, node) { irq_msk = readl(port->base + GPIO_IMR); if (!irq_msk) continue; irq_stat = readl(port->base + GPIO_ISR) & irq_msk; if (irq_stat) mxc_gpio_irq_handler(port, irq_stat); } } /* * Set interrupt number "irq" in the GPIO as a wake-up source. * While system is running, all registered GPIO interrupts need to have * wake-up enabled. When system is suspended, only selected GPIO interrupts * need to have wake-up enabled. * @param irq interrupt source number * @param enable enable as wake-up if equal to non-zero * @return This function returns 0 on success. */ static int gpio_set_wake_irq(struct irq_data *d, u32 enable) { u32 gpio = irq_to_gpio(d->irq); u32 gpio_idx = gpio & 0x1F; struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct mxc_gpio_port *port = gc->private; if (enable) { if (port->irq_high && (gpio_idx >= 16)) enable_irq_wake(port->irq_high); else enable_irq_wake(port->irq); } else { if (port->irq_high && (gpio_idx >= 16)) disable_irq_wake(port->irq_high); else disable_irq_wake(port->irq); } return 0; } static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("gpio-mxc", 1, port->virtual_irq_start, port->base, handle_level_irq); gc->private = port; ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_set_irq_type; ct->chip.irq_set_wake = gpio_set_wake_irq; ct->regs.ack = GPIO_ISR; ct->regs.mask = GPIO_IMR; irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); } static void __devinit mxc_gpio_get_hw(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(mxc_gpio_dt_ids, &pdev->dev); enum mxc_gpio_hwtype hwtype; if (of_id) pdev->id_entry = of_id->data; hwtype = pdev->id_entry->driver_data; if (mxc_gpio_hwtype) { /* * The driver works with a reasonable presupposition, * that is all gpio ports must be the same type when * running on one soc. */ BUG_ON(mxc_gpio_hwtype != hwtype); return; } if (hwtype == IMX31_GPIO) mxc_gpio_hwdata = &imx31_gpio_hwdata; else mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata; mxc_gpio_hwtype = hwtype; } static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct bgpio_chip *bgc = to_bgpio_chip(gc); struct mxc_gpio_port *port = container_of(bgc, struct mxc_gpio_port, bgc); return port->virtual_irq_start + offset; } static int __devinit mxc_gpio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mxc_gpio_port *port; struct resource *iores; int err; mxc_gpio_get_hw(pdev); port = kzalloc(sizeof(struct mxc_gpio_port), GFP_KERNEL); if (!port) return -ENOMEM; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iores) { err = -ENODEV; goto out_kfree; } if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { err = -EBUSY; goto out_kfree; } port->base = ioremap(iores->start, resource_size(iores)); if (!port->base) { err = -ENOMEM; goto out_release_mem; } port->irq_high = platform_get_irq(pdev, 1); port->irq = platform_get_irq(pdev, 0); if (port->irq < 0) { err = -EINVAL; goto out_iounmap; } /* disable the interrupt and clear the status */ writel(0, port->base + GPIO_IMR); writel(~0, port->base + GPIO_ISR); if (mxc_gpio_hwtype == IMX21_GPIO) { /* setup one handler for all GPIO interrupts */ if (pdev->id == 0) irq_set_chained_handler(port->irq, mx2_gpio_irq_handler); } else { /* setup one handler for each entry */ irq_set_chained_handler(port->irq, mx3_gpio_irq_handler); irq_set_handler_data(port->irq, port); if (port->irq_high > 0) { /* setup handler for GPIO 16 to 31 */ irq_set_chained_handler(port->irq_high, mx3_gpio_irq_handler); irq_set_handler_data(port->irq_high, port); } } err = bgpio_init(&port->bgc, &pdev->dev, 4, port->base + GPIO_PSR, port->base + GPIO_DR, NULL, port->base + GPIO_GDIR, NULL, false); if (err) goto out_iounmap; port->bgc.gc.to_irq = mxc_gpio_to_irq; port->bgc.gc.base = pdev->id * 32; port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); port->bgc.data = port->bgc.read_reg(port->bgc.reg_set); err = gpiochip_add(&port->bgc.gc); if (err) goto out_bgpio_remove; /* * In dt case, we use gpio number range dynamically * allocated by gpio core. */ port->virtual_irq_start = MXC_GPIO_IRQ_START + (np ? port->bgc.gc.base : pdev->id * 32); /* gpio-mxc can be a generic irq chip */ mxc_gpio_init_gc(port); list_add_tail(&port->node, &mxc_gpio_ports); return 0; out_bgpio_remove: bgpio_remove(&port->bgc); out_iounmap: iounmap(port->base); out_release_mem: release_mem_region(iores->start, resource_size(iores)); out_kfree: kfree(port); dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err); return err; } static struct platform_driver mxc_gpio_driver = { .driver = { .name = "gpio-mxc", .owner = THIS_MODULE, .of_match_table = mxc_gpio_dt_ids, }, .probe = mxc_gpio_probe, .id_table = mxc_gpio_devtype, }; static int __init gpio_mxc_init(void) { return platform_driver_register(&mxc_gpio_driver); } postcore_initcall(gpio_mxc_init); MODULE_AUTHOR("Freescale Semiconductor, " "Daniel Mack <danielncaiaq.de>, " "Juergen Beisert <kernel@pengutronix.de>"); MODULE_DESCRIPTION("Freescale MXC GPIO"); MODULE_LICENSE("GPL");
gpl-2.0
trunghieuhust/android_sony_kernel_msm8260A
arch/powerpc/platforms/embedded6xx/storcenter.c
7363
2976
/* * Board setup routines for the storcenter * * Copyright 2007 (C) Oyvind Repvik (nail@nslu2-linux.org) * Copyright 2007 Andy Wilcox, Jon Loeliger * * Based on linkstation.c by G. Liakhovetski * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of * any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/initrd.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/prom.h> #include <asm/mpic.h> #include <asm/pci-bridge.h> #include "mpc10x.h" static __initdata struct of_device_id storcenter_of_bus[] = { { .name = "soc", }, {}, }; static int __init storcenter_device_probe(void) { of_platform_bus_probe(NULL, storcenter_of_bus, NULL); return 0; } machine_device_initcall(storcenter, storcenter_device_probe); static int __init storcenter_add_bridge(struct device_node *dev) { #ifdef CONFIG_PCI int len; struct pci_controller *hose; const int *bus_range; printk("Adding PCI host bridge %s\n", dev->full_name); hose = pcibios_alloc_controller(dev); if (hose == NULL) return -ENOMEM; bus_range = of_get_property(dev, "bus-range", &len); hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; setup_indirect_pci(hose, MPC10X_MAPB_CNFG_ADDR, MPC10X_MAPB_CNFG_DATA, 0); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, 1); #endif return 0; } static void __init storcenter_setup_arch(void) { struct device_node *np; /* Lookup PCI host bridges */ for_each_compatible_node(np, "pci", "mpc10x-pci") storcenter_add_bridge(np); printk(KERN_INFO "IOMEGA StorCenter\n"); } /* * Interrupt setup and service. Interrupts on the turbostation come * from the four PCI slots plus onboard 8241 devices: I2C, DUART. */ static void __init storcenter_init_IRQ(void) { struct mpic *mpic; mpic = mpic_alloc(NULL, 0, 0, 16, 0, " OpenPIC "); BUG_ON(mpic == NULL); /* * 16 Serial Interrupts followed by 16 Internal Interrupts. * I2C is the second internal, so it is at 17, 0x11020. */ mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200); mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000); mpic_init(mpic); } static void storcenter_restart(char *cmd) { local_irq_disable(); /* Set exception prefix high - to the firmware */ _nmask_and_or_msr(0, MSR_IP); /* Wait for reset to happen */ for (;;) ; } static int __init storcenter_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "iomega,storcenter"); } define_machine(storcenter){ .name = "IOMEGA StorCenter", .probe = storcenter_probe, .setup_arch = storcenter_setup_arch, .init_IRQ = storcenter_init_IRQ, .get_irq = mpic_get_irq, .restart = storcenter_restart, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
sxwzhw/iproj-su640
drivers/spi/spi-omap-100k.c
8387
15967
/* * OMAP7xx SPI 100k controller driver * Author: Fabrice Crohas <fcrohas@gmail.com> * from original omap1_mcspi driver * * Copyright (C) 2005, 2006 Nokia Corporation * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and * Juha Yrj�l� <juha.yrjola@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <plat/clock.h> #define OMAP1_SPI100K_MAX_FREQ 48000000 #define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12) #define SPI_SETUP1 0x00 #define SPI_SETUP2 0x02 #define SPI_CTRL 0x04 #define SPI_STATUS 0x06 #define SPI_TX_LSB 0x08 #define SPI_TX_MSB 0x0a #define SPI_RX_LSB 0x0c #define SPI_RX_MSB 0x0e #define SPI_SETUP1_INT_READ_ENABLE (1UL << 5) #define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4) #define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1) #define SPI_SETUP1_CLOCK_ENABLE (1UL << 0) #define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0) #define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0) #define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5) #define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5) #define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10) #define SPI_SETUP2_EDGE_TRIGGER (1UL << 10) #define SPI_CTRL_SEN(x) ((x) << 7) #define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2) #define SPI_CTRL_WR (1UL << 1) #define SPI_CTRL_RD (1UL << 0) #define SPI_STATUS_WE (1UL << 1) #define SPI_STATUS_RD (1UL << 0) #define WRITE 0 #define READ 1 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and * cache operations; better heuristics consider wordsize and bitrate. */ #define DMA_MIN_BYTES 8 #define SPI_RUNNING 0 #define SPI_SHUTDOWN 1 struct omap1_spi100k { struct work_struct work; /* lock protects queue and registers */ spinlock_t lock; struct list_head msg_queue; struct spi_master *master; struct clk *ick; struct clk *fck; /* Virtual base address of the controller */ void __iomem *base; /* State of the SPI */ unsigned int state; }; struct omap1_spi100k_cs { void __iomem *base; int word_len; }; static struct workqueue_struct *omap1_spi100k_wq; #define MOD_REG_BIT(val, mask, set) do { \ if (set) \ val |= mask; \ else \ val &= ~mask; \ } while (0) static void spi100k_enable_clock(struct spi_master *master) { unsigned int val; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* enable SPI */ val = readw(spi100k->base + SPI_SETUP1); val |= SPI_SETUP1_CLOCK_ENABLE; writew(val, spi100k->base + SPI_SETUP1); } static void spi100k_disable_clock(struct spi_master *master) { unsigned int val; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* disable SPI */ val = readw(spi100k->base + SPI_SETUP1); val &= ~SPI_SETUP1_CLOCK_ENABLE; writew(val, spi100k->base + SPI_SETUP1); } static void spi100k_write_data(struct spi_master *master, int len, int data) { struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* write 16-bit word, shifting 8-bit data if necessary */ if (len <= 8) { data <<= 8; len = 16; } spi100k_enable_clock(master); writew( data , spi100k->base + SPI_TX_MSB); writew(SPI_CTRL_SEN(0) | SPI_CTRL_WORD_SIZE(len) | SPI_CTRL_WR, spi100k->base + SPI_CTRL); /* Wait for bit ack send change */ while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE); udelay(1000); spi100k_disable_clock(master); } static int spi100k_read_data(struct spi_master *master, int len) { int dataH,dataL; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); /* Always do at least 16 bits */ if (len <= 8) len = 16; spi100k_enable_clock(master); writew(SPI_CTRL_SEN(0) | SPI_CTRL_WORD_SIZE(len) | SPI_CTRL_RD, spi100k->base + SPI_CTRL); while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD); udelay(1000); dataL = readw(spi100k->base + SPI_RX_LSB); dataH = readw(spi100k->base + SPI_RX_MSB); spi100k_disable_clock(master); return dataL; } static void spi100k_open(struct spi_master *master) { /* get control of SPI */ struct omap1_spi100k *spi100k = spi_master_get_devdata(master); writew(SPI_SETUP1_INT_READ_ENABLE | SPI_SETUP1_INT_WRITE_ENABLE | SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1); /* configure clock and interrupts */ writew(SPI_SETUP2_ACTIVE_EDGE_FALLING | SPI_SETUP2_NEGATIVE_LEVEL | SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2); } static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable) { if (enable) writew(0x05fc, spi100k->base + SPI_CTRL); else writew(0x05fd, spi100k->base + SPI_CTRL); } static unsigned omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) { struct omap1_spi100k *spi100k; struct omap1_spi100k_cs *cs = spi->controller_state; unsigned int count, c; int word_len; spi100k = spi_master_get_devdata(spi->master); count = xfer->len; c = count; word_len = cs->word_len; if (word_len <= 8) { u8 *rx; const u8 *tx; rx = xfer->rx_buf; tx = xfer->tx_buf; do { c-=1; if (xfer->tx_buf != NULL) spi100k_write_data(spi->master, word_len, *tx++); if (xfer->rx_buf != NULL) *rx++ = spi100k_read_data(spi->master, word_len); } while(c); } else if (word_len <= 16) { u16 *rx; const u16 *tx; rx = xfer->rx_buf; tx = xfer->tx_buf; do { c-=2; if (xfer->tx_buf != NULL) spi100k_write_data(spi->master,word_len, *tx++); if (xfer->rx_buf != NULL) *rx++ = spi100k_read_data(spi->master,word_len); } while(c); } else if (word_len <= 32) { u32 *rx; const u32 *tx; rx = xfer->rx_buf; tx = xfer->tx_buf; do { c-=4; if (xfer->tx_buf != NULL) spi100k_write_data(spi->master,word_len, *tx); if (xfer->rx_buf != NULL) *rx = spi100k_read_data(spi->master,word_len); } while(c); } return count - c; } /* called only when no transfer is active to this device */ static int omap1_spi100k_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master); struct omap1_spi100k_cs *cs = spi->controller_state; u8 word_len = spi->bits_per_word; if (t != NULL && t->bits_per_word) word_len = t->bits_per_word; if (!word_len) word_len = 8; if (spi->bits_per_word > 32) return -EINVAL; cs->word_len = word_len; /* SPI init before transfer */ writew(0x3e , spi100k->base + SPI_SETUP1); writew(0x00 , spi100k->base + SPI_STATUS); writew(0x3e , spi100k->base + SPI_CTRL); return 0; } /* the spi->mode bits understood by this driver: */ #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) static int omap1_spi100k_setup(struct spi_device *spi) { int ret; struct omap1_spi100k *spi100k; struct omap1_spi100k_cs *cs = spi->controller_state; if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", spi->bits_per_word); return -EINVAL; } spi100k = spi_master_get_devdata(spi->master); if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; cs->base = spi100k->base + spi->chip_select * 0x14; spi->controller_state = cs; } spi100k_open(spi->master); clk_enable(spi100k->ick); clk_enable(spi100k->fck); ret = omap1_spi100k_setup_transfer(spi, NULL); clk_disable(spi100k->ick); clk_disable(spi100k->fck); return ret; } static void omap1_spi100k_work(struct work_struct *work) { struct omap1_spi100k *spi100k; int status = 0; spi100k = container_of(work, struct omap1_spi100k, work); spin_lock_irq(&spi100k->lock); clk_enable(spi100k->ick); clk_enable(spi100k->fck); /* We only enable one channel at a time -- the one whose message is * at the head of the queue -- although this controller would gladly * arbitrate among multiple channels. This corresponds to "single * channel" master mode. As a side effect, we need to manage the * chipselect with the FORCE bit ... CS != channel enable. */ while (!list_empty(&spi100k->msg_queue)) { struct spi_message *m; struct spi_device *spi; struct spi_transfer *t = NULL; int cs_active = 0; struct omap1_spi100k_cs *cs; int par_override = 0; m = container_of(spi100k->msg_queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock_irq(&spi100k->lock); spi = m->spi; cs = spi->controller_state; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { status = -EINVAL; break; } if (par_override || t->speed_hz || t->bits_per_word) { par_override = 1; status = omap1_spi100k_setup_transfer(spi, t); if (status < 0) break; if (!t->speed_hz && !t->bits_per_word) par_override = 0; } if (!cs_active) { omap1_spi100k_force_cs(spi100k, 1); cs_active = 1; } if (t->len) { unsigned count; count = omap1_spi100k_txrx_pio(spi, t); m->actual_length += count; if (count != t->len) { status = -EIO; break; } } if (t->delay_usecs) udelay(t->delay_usecs); /* ignore the "leave it on after last xfer" hint */ if (t->cs_change) { omap1_spi100k_force_cs(spi100k, 0); cs_active = 0; } } /* Restore defaults if they were overriden */ if (par_override) { par_override = 0; status = omap1_spi100k_setup_transfer(spi, NULL); } if (cs_active) omap1_spi100k_force_cs(spi100k, 0); m->status = status; m->complete(m->context); spin_lock_irq(&spi100k->lock); } clk_disable(spi100k->ick); clk_disable(spi100k->fck); spin_unlock_irq(&spi100k->lock); if (status < 0) printk(KERN_WARNING "spi transfer failed with %d\n", status); } static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m) { struct omap1_spi100k *spi100k; unsigned long flags; struct spi_transfer *t; m->actual_length = 0; m->status = -EINPROGRESS; spi100k = spi_master_get_devdata(spi->master); /* Don't accept new work if we're shutting down */ if (spi100k->state == SPI_SHUTDOWN) return -ESHUTDOWN; /* reject invalid messages and transfers */ if (list_empty(&m->transfers) || !m->complete) return -EINVAL; list_for_each_entry(t, &m->transfers, transfer_list) { const void *tx_buf = t->tx_buf; void *rx_buf = t->rx_buf; unsigned len = t->len; if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ || (len && !(rx_buf || tx_buf)) || (t->bits_per_word && ( t->bits_per_word < 4 || t->bits_per_word > 32))) { dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", t->speed_hz, len, tx_buf ? "tx" : "", rx_buf ? "rx" : "", t->bits_per_word); return -EINVAL; } if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) { dev_dbg(&spi->dev, "%d Hz max exceeds %d\n", t->speed_hz, OMAP1_SPI100K_MAX_FREQ/(1<<16)); return -EINVAL; } } spin_lock_irqsave(&spi100k->lock, flags); list_add_tail(&m->queue, &spi100k->msg_queue); queue_work(omap1_spi100k_wq, &spi100k->work); spin_unlock_irqrestore(&spi100k->lock, flags); return 0; } static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k) { return 0; } static int __devinit omap1_spi100k_probe(struct platform_device *pdev) { struct spi_master *master; struct omap1_spi100k *spi100k; int status = 0; if (!pdev->id) return -EINVAL; master = spi_alloc_master(&pdev->dev, sizeof *spi100k); if (master == NULL) { dev_dbg(&pdev->dev, "master allocation failed\n"); return -ENOMEM; } if (pdev->id != -1) master->bus_num = pdev->id; master->setup = omap1_spi100k_setup; master->transfer = omap1_spi100k_transfer; master->cleanup = NULL; master->num_chipselect = 2; master->mode_bits = MODEBITS; dev_set_drvdata(&pdev->dev, master); spi100k = spi_master_get_devdata(master); spi100k->master = master; /* * The memory region base address is taken as the platform_data. * You should allocate this with ioremap() before initializing * the SPI. */ spi100k->base = (void __iomem *) pdev->dev.platform_data; INIT_WORK(&spi100k->work, omap1_spi100k_work); spin_lock_init(&spi100k->lock); INIT_LIST_HEAD(&spi100k->msg_queue); spi100k->ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(spi100k->ick)) { dev_dbg(&pdev->dev, "can't get spi100k_ick\n"); status = PTR_ERR(spi100k->ick); goto err1; } spi100k->fck = clk_get(&pdev->dev, "fck"); if (IS_ERR(spi100k->fck)) { dev_dbg(&pdev->dev, "can't get spi100k_fck\n"); status = PTR_ERR(spi100k->fck); goto err2; } if (omap1_spi100k_reset(spi100k) < 0) goto err3; status = spi_register_master(master); if (status < 0) goto err3; spi100k->state = SPI_RUNNING; return status; err3: clk_put(spi100k->fck); err2: clk_put(spi100k->ick); err1: spi_master_put(master); return status; } static int __exit omap1_spi100k_remove(struct platform_device *pdev) { struct spi_master *master; struct omap1_spi100k *spi100k; struct resource *r; unsigned limit = 500; unsigned long flags; int status = 0; master = dev_get_drvdata(&pdev->dev); spi100k = spi_master_get_devdata(master); spin_lock_irqsave(&spi100k->lock, flags); spi100k->state = SPI_SHUTDOWN; while (!list_empty(&spi100k->msg_queue) && limit--) { spin_unlock_irqrestore(&spi100k->lock, flags); msleep(10); spin_lock_irqsave(&spi100k->lock, flags); } if (!list_empty(&spi100k->msg_queue)) status = -EBUSY; spin_unlock_irqrestore(&spi100k->lock, flags); if (status != 0) return status; clk_put(spi100k->fck); clk_put(spi100k->ick); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi_unregister_master(master); return 0; } static struct platform_driver omap1_spi100k_driver = { .driver = { .name = "omap1_spi100k", .owner = THIS_MODULE, }, .remove = __exit_p(omap1_spi100k_remove), }; static int __init omap1_spi100k_init(void) { omap1_spi100k_wq = create_singlethread_workqueue( omap1_spi100k_driver.driver.name); if (omap1_spi100k_wq == NULL) return -1; return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe); } static void __exit omap1_spi100k_exit(void) { platform_driver_unregister(&omap1_spi100k_driver); destroy_workqueue(omap1_spi100k_wq); } module_init(omap1_spi100k_init); module_exit(omap1_spi100k_exit); MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver"); MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
lexmazter/dellstreak5-kernel-3.4
drivers/input/input-compat.c
8899
3372
/* * 32bit compatibility wrappers for the input subsystem. * * Very heavily based on evdev.c - Copyright (c) 1999-2002 Vojtech Pavlik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/export.h> #include <asm/uaccess.h> #include "input-compat.h" #ifdef CONFIG_COMPAT int input_event_from_user(const char __user *buffer, struct input_event *event) { if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) { struct input_event_compat compat_event; if (copy_from_user(&compat_event, buffer, sizeof(struct input_event_compat))) return -EFAULT; event->time.tv_sec = compat_event.time.tv_sec; event->time.tv_usec = compat_event.time.tv_usec; event->type = compat_event.type; event->code = compat_event.code; event->value = compat_event.value; } else { if (copy_from_user(event, buffer, sizeof(struct input_event))) return -EFAULT; } return 0; } int input_event_to_user(char __user *buffer, const struct input_event *event) { if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) { struct input_event_compat compat_event; compat_event.time.tv_sec = event->time.tv_sec; compat_event.time.tv_usec = event->time.tv_usec; compat_event.type = event->type; compat_event.code = event->code; compat_event.value = event->value; if (copy_to_user(buffer, &compat_event, sizeof(struct input_event_compat))) return -EFAULT; } else { if (copy_to_user(buffer, event, sizeof(struct input_event))) return -EFAULT; } return 0; } int input_ff_effect_from_user(const char __user *buffer, size_t size, struct ff_effect *effect) { if (INPUT_COMPAT_TEST) { struct ff_effect_compat *compat_effect; if (size != sizeof(struct ff_effect_compat)) return -EINVAL; /* * It so happens that the pointer which needs to be changed * is the last field in the structure, so we can retrieve the * whole thing and replace just the pointer. */ compat_effect = (struct ff_effect_compat *)effect; if (copy_from_user(compat_effect, buffer, sizeof(struct ff_effect_compat))) return -EFAULT; if (compat_effect->type == FF_PERIODIC && compat_effect->u.periodic.waveform == FF_CUSTOM) effect->u.periodic.custom_data = compat_ptr(compat_effect->u.periodic.custom_data); } else { if (size != sizeof(struct ff_effect)) return -EINVAL; if (copy_from_user(effect, buffer, sizeof(struct ff_effect))) return -EFAULT; } return 0; } #else int input_event_from_user(const char __user *buffer, struct input_event *event) { if (copy_from_user(event, buffer, sizeof(struct input_event))) return -EFAULT; return 0; } int input_event_to_user(char __user *buffer, const struct input_event *event) { if (copy_to_user(buffer, event, sizeof(struct input_event))) return -EFAULT; return 0; } int input_ff_effect_from_user(const char __user *buffer, size_t size, struct ff_effect *effect) { if (size != sizeof(struct ff_effect)) return -EINVAL; if (copy_from_user(effect, buffer, sizeof(struct ff_effect))) return -EFAULT; return 0; } #endif /* CONFIG_COMPAT */ EXPORT_SYMBOL_GPL(input_event_from_user); EXPORT_SYMBOL_GPL(input_event_to_user); EXPORT_SYMBOL_GPL(input_ff_effect_from_user);
gpl-2.0
OliverG96/android_kernel_samsung_golden
crypto/cts.c
10179
10045
/* * CTS: Cipher Text Stealing mode * * COPYRIGHT (c) 2008 * The Regents of the University of Michigan * ALL RIGHTS RESERVED * * Permission is granted to use, copy, create derivative works * and redistribute this software and such derivative works * for any purpose, so long as the name of The University of * Michigan is not used in any advertising or publicity * pertaining to the use of distribution of this software * without specific, written prior authorization. If the * above copyright notice or any other identification of the * University of Michigan is included in any copy of any * portion of this software, then the disclaimer below must * also be included. * * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF * SUCH DAMAGES. */ /* Derived from various: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> */ /* * This is the Cipher Text Stealing mode as described by * Section 8 of rfc2040 and referenced by rfc3962. * rfc3962 includes errata information in its Appendix A. */ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <crypto/scatterwalk.h> #include <linux/slab.h> struct crypto_cts_ctx { struct crypto_blkcipher *child; }; static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent); struct crypto_blkcipher *child = ctx->child; int err; crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_blkcipher_setkey(child, key, keylen); crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) { int bsize = crypto_blkcipher_blocksize(desc->tfm); u8 tmp[bsize], tmp2[bsize]; struct blkcipher_desc lcldesc; struct scatterlist sgsrc[1], sgdst[1]; int lastn = nbytes - bsize; u8 iv[bsize]; u8 s[bsize * 2], d[bsize * 2]; int err; if (lastn < 0) return -EINVAL; sg_init_table(sgsrc, 1); sg_init_table(sgdst, 1); memset(s, 0, sizeof(s)); scatterwalk_map_and_copy(s, src, offset, nbytes, 0); memcpy(iv, desc->info, bsize); lcldesc.tfm = ctx->child; lcldesc.info = iv; lcldesc.flags = desc->flags; sg_set_buf(&sgsrc[0], s, bsize); sg_set_buf(&sgdst[0], tmp, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d + bsize, tmp, lastn); lcldesc.info = tmp; sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], tmp2, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d, tmp2, bsize); scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); memcpy(desc->info, tmp2, bsize); return err; } static int crypto_cts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int bsize = crypto_blkcipher_blocksize(desc->tfm); int tot_blocks = (nbytes + bsize - 1) / bsize; int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; struct blkcipher_desc lcldesc; int err; lcldesc.tfm = ctx->child; lcldesc.info = desc->info; lcldesc.flags = desc->flags; if (tot_blocks == 1) { err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize); } else if (nbytes <= bsize * 2) { err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes); } else { /* do normal function for tot_blocks - 2 */ err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, cbc_blocks * bsize); if (err == 0) { /* do cts for final two blocks */ err = cts_cbc_encrypt(ctx, desc, dst, src, cbc_blocks * bsize, nbytes - (cbc_blocks * bsize)); } } return err; } static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) { int bsize = crypto_blkcipher_blocksize(desc->tfm); u8 tmp[bsize]; struct blkcipher_desc lcldesc; struct scatterlist sgsrc[1], sgdst[1]; int lastn = nbytes - bsize; u8 iv[bsize]; u8 s[bsize * 2], d[bsize * 2]; int err; if (lastn < 0) return -EINVAL; sg_init_table(sgsrc, 1); sg_init_table(sgdst, 1); scatterwalk_map_and_copy(s, src, offset, nbytes, 0); lcldesc.tfm = ctx->child; lcldesc.info = iv; lcldesc.flags = desc->flags; /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ memset(iv, 0, sizeof(iv)); sg_set_buf(&sgsrc[0], s, bsize); sg_set_buf(&sgdst[0], tmp, bsize); err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); if (err) return err; /* 2. Pad Cn with zeros at the end to create C of length BB */ memset(iv, 0, sizeof(iv)); memcpy(iv, s + bsize, lastn); /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ crypto_xor(tmp, iv, bsize); /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ memcpy(d + bsize, tmp, lastn); /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); /* 6. Decrypt En to create Pn-1 */ memset(iv, 0, sizeof(iv)); sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], d, bsize); err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); /* XOR with previous block */ crypto_xor(d, desc->info, bsize); scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); memcpy(desc->info, s, bsize); return err; } static int crypto_cts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int bsize = crypto_blkcipher_blocksize(desc->tfm); int tot_blocks = (nbytes + bsize - 1) / bsize; int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; struct blkcipher_desc lcldesc; int err; lcldesc.tfm = ctx->child; lcldesc.info = desc->info; lcldesc.flags = desc->flags; if (tot_blocks == 1) { err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize); } else if (nbytes <= bsize * 2) { err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes); } else { /* do normal function for tot_blocks - 2 */ err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, cbc_blocks * bsize); if (err == 0) { /* do cts for final two blocks */ err = cts_cbc_decrypt(ctx, desc, dst, src, cbc_blocks * bsize, nbytes - (cbc_blocks * bsize)); } } return err; } static int crypto_cts_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_blkcipher *cipher; cipher = crypto_spawn_blkcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; } static void crypto_cts_exit_tfm(struct crypto_tfm *tfm) { struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(ctx->child); } static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); err = PTR_ERR(alg); if (IS_ERR(alg)) return ERR_PTR(err); inst = ERR_PTR(-EINVAL); if (!is_power_of_2(alg->cra_blocksize)) goto out_put_alg; inst = crypto_alloc_instance("cts", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; /* We access the data as u32s when xoring. */ inst->alg.cra_alignmask |= __alignof__(u32) - 1; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; inst->alg.cra_blkcipher.geniv = "seqiv"; inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); inst->alg.cra_init = crypto_cts_init_tfm; inst->alg.cra_exit = crypto_cts_exit_tfm; inst->alg.cra_blkcipher.setkey = crypto_cts_setkey; inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt; inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void crypto_cts_free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_cts_tmpl = { .name = "cts", .alloc = crypto_cts_alloc, .free = crypto_cts_free, .module = THIS_MODULE, }; static int __init crypto_cts_module_init(void) { return crypto_register_template(&crypto_cts_tmpl); } static void __exit crypto_cts_module_exit(void) { crypto_unregister_template(&crypto_cts_tmpl); } module_init(crypto_cts_module_init); module_exit(crypto_cts_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
gpl-2.0
RenderBroken/msm8974_Victara_render_kernel
fs/nls/nls_ascii.c
12227
5874
/* * linux/fs/nls/nls_ascii.c * * Charset ascii translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ }; static const unsigned char *const page_uni2charset[256] = { page00, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "ascii", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_ascii(void) { return register_nls(&table); } static void __exit exit_nls_ascii(void) { unregister_nls(&table); } module_init(init_nls_ascii) module_exit(exit_nls_ascii) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
af974/RedPill-Kernel
drivers/infiniband/hw/ipath/ipath_keys.c
14275
6523
/* * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <asm/io.h> #include "ipath_verbs.h" #include "ipath_kernel.h" /** * ipath_alloc_lkey - allocate an lkey * @rkt: lkey table in which to allocate the lkey * @mr: memory region that this lkey protects * * Returns 1 if successful, otherwise returns 0. */ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr) { unsigned long flags; u32 r; u32 n; int ret; spin_lock_irqsave(&rkt->lock, flags); /* Find the next available LKEY */ r = n = rkt->next; for (;;) { if (rkt->table[r] == NULL) break; r = (r + 1) & (rkt->max - 1); if (r == n) { spin_unlock_irqrestore(&rkt->lock, flags); ipath_dbg("LKEY table full\n"); ret = 0; goto bail; } } rkt->next = (r + 1) & (rkt->max - 1); /* * Make sure lkey is never zero which is reserved to indicate an * unrestricted LKEY. */ rkt->gen++; mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) | ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen) << 8); if (mr->lkey == 0) { mr->lkey |= 1 << 8; rkt->gen++; } rkt->table[r] = mr; spin_unlock_irqrestore(&rkt->lock, flags); ret = 1; bail: return ret; } /** * ipath_free_lkey - free an lkey * @rkt: table from which to free the lkey * @lkey: lkey id to free */ void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey) { unsigned long flags; u32 r; if (lkey == 0) return; r = lkey >> (32 - ib_ipath_lkey_table_size); spin_lock_irqsave(&rkt->lock, flags); rkt->table[r] = NULL; spin_unlock_irqrestore(&rkt->lock, flags); } /** * ipath_lkey_ok - check IB SGE for validity and initialize * @rkt: table containing lkey to check SGE against * @isge: outgoing internal SGE * @sge: SGE to check * @acc: access flags * * Return 1 if valid and successful, otherwise returns 0. * * Check the IB SGE for validity and initialize our internal version * of it. */ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, struct ib_sge *sge, int acc) { struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; struct ipath_mregion *mr; unsigned n, m; size_t off; int ret; /* * We use LKEY == zero for kernel virtual addresses * (see ipath_get_dma_mr and ipath_dma.c). */ if (sge->lkey == 0) { /* always a kernel port, no locking needed */ struct ipath_pd *pd = to_ipd(qp->ibqp.pd); if (pd->user) { ret = 0; goto bail; } isge->mr = NULL; isge->vaddr = (void *) sge->addr; isge->length = sge->length; isge->sge_length = sge->length; ret = 1; goto bail; } mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; if (unlikely(mr == NULL || mr->lkey != sge->lkey || qp->ibqp.pd != mr->pd)) { ret = 0; goto bail; } off = sge->addr - mr->user_base; if (unlikely(sge->addr < mr->user_base || off + sge->length > mr->length || (mr->access_flags & acc) != acc)) { ret = 0; goto bail; } off += mr->offset; m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; if (n >= IPATH_SEGSZ) { m++; n = 0; } } isge->mr = mr; isge->vaddr = mr->map[m]->segs[n].vaddr + off; isge->length = mr->map[m]->segs[n].length - off; isge->sge_length = sge->length; isge->m = m; isge->n = n; ret = 1; bail: return ret; } /** * ipath_rkey_ok - check the IB virtual address, length, and RKEY * @dev: infiniband device * @ss: SGE state * @len: length of data * @vaddr: virtual address to place data * @rkey: rkey to check * @acc: access flags * * Return 1 if successful, otherwise 0. */ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, u32 len, u64 vaddr, u32 rkey, int acc) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_lkey_table *rkt = &dev->lk_table; struct ipath_sge *sge = &ss->sge; struct ipath_mregion *mr; unsigned n, m; size_t off; int ret; /* * We use RKEY == zero for kernel virtual addresses * (see ipath_get_dma_mr and ipath_dma.c). */ if (rkey == 0) { /* always a kernel port, no locking needed */ struct ipath_pd *pd = to_ipd(qp->ibqp.pd); if (pd->user) { ret = 0; goto bail; } sge->mr = NULL; sge->vaddr = (void *) vaddr; sge->length = len; sge->sge_length = len; ss->sg_list = NULL; ss->num_sge = 1; ret = 1; goto bail; } mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) { ret = 0; goto bail; } off = vaddr - mr->iova; if (unlikely(vaddr < mr->iova || off + len > mr->length || (mr->access_flags & acc) == 0)) { ret = 0; goto bail; } off += mr->offset; m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; if (n >= IPATH_SEGSZ) { m++; n = 0; } } sge->mr = mr; sge->vaddr = mr->map[m]->segs[n].vaddr + off; sge->length = mr->map[m]->segs[n].length - off; sge->sge_length = len; sge->m = m; sge->n = n; ss->sg_list = NULL; ss->num_sge = 1; ret = 1; bail: return ret; }
gpl-2.0
NewbyJE/android_kernel_samsung_msm8660-common
drivers/block/paride/on20.c
15555
3096
/* on20.c (c) 1996-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. on20.c is a low-level protocol driver for the Onspec 90c20 parallel to IDE adapter. */ /* Changes: 1.01 GRG 1998.05.06 init_proto, release_proto */ #define ON20_VERSION "1.01" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #define op(f) w2(4);w0(f);w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4); #define vl(v) w2(4);w0(v);w2(5);w2(7);w2(5);w2(4); #define j44(a,b) (((a>>4)&0x0f)|(b&0xf0)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static int on20_read_regr( PIA *pi, int cont, int regr ) { int h,l, r ; r = (regr<<2) + 1 + cont; op(1); vl(r); op(0); switch (pi->mode) { case 0: w2(4); w2(6); l = r1(); w2(4); w2(6); h = r1(); w2(4); w2(6); w2(4); w2(6); w2(4); return j44(l,h); case 1: w2(4); w2(0x26); r = r0(); w2(4); w2(0x26); w2(4); return r; } return -1; } static void on20_write_regr( PIA *pi, int cont, int regr, int val ) { int r; r = (regr<<2) + 1 + cont; op(1); vl(r); op(0); vl(val); op(0); vl(val); } static void on20_connect ( PIA *pi) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(4);w0(0);w2(0xc);w2(4);w2(6);w2(4);w2(6);w2(4); if (pi->mode) { op(2); vl(8); op(2); vl(9); } else { op(2); vl(0); op(2); vl(8); } } static void on20_disconnect ( PIA *pi ) { w2(4);w0(7);w2(4);w2(0xc);w2(4); w0(pi->saved_r0); w2(pi->saved_r2); } static void on20_read_block( PIA *pi, char * buf, int count ) { int k, l, h; op(1); vl(1); op(0); for (k=0;k<count;k++) if (pi->mode) { w2(4); w2(0x26); buf[k] = r0(); } else { w2(6); l = r1(); w2(4); w2(6); h = r1(); w2(4); buf[k] = j44(l,h); } w2(4); } static void on20_write_block( PIA *pi, char * buf, int count ) { int k; op(1); vl(1); op(0); for (k=0;k<count;k++) { w2(5); w0(buf[k]); w2(7); } w2(4); } static void on20_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[2] = {"4-bit","8-bit"}; printk("%s: on20 %s, OnSpec 90c20 at 0x%x, ", pi->device,ON20_VERSION,pi->port); printk("mode %d (%s), delay %d\n",pi->mode, mode_string[pi->mode],pi->delay); } static struct pi_protocol on20 = { .owner = THIS_MODULE, .name = "on20", .max_mode = 2, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = on20_write_regr, .read_regr = on20_read_regr, .write_block = on20_write_block, .read_block = on20_read_block, .connect = on20_connect, .disconnect = on20_disconnect, .log_adapter = on20_log_adapter, }; static int __init on20_init(void) { return paride_register(&on20); } static void __exit on20_exit(void) { paride_unregister(&on20); } MODULE_LICENSE("GPL"); module_init(on20_init) module_exit(on20_exit)
gpl-2.0
Snakefreak/i9100kerneljbhk
drivers/gpu/drm/nouveau/nouveau_dp.c
452
15556
/* * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_i2c.h" #include "nouveau_connector.h" #include "nouveau_encoder.h" #include "nouveau_crtc.h" #include "nouveau_gpio.h" /****************************************************************************** * aux channel util functions *****************************************************************************/ #define AUX_DBG(fmt, args...) do { \ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) { \ NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args); \ } \ } while (0) #define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args) static void auxch_fini(struct drm_device *dev, int ch) { nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000); } static int auxch_init(struct drm_device *dev, int ch) { const u32 unksel = 1; /* nfi which to use, or if it matters.. */ const u32 ureq = unksel ? 0x00100000 : 0x00200000; const u32 urep = unksel ? 0x01000000 : 0x02000000; u32 ctrl, timeout; /* wait up to 1ms for any previous transaction to be done... */ timeout = 1000; do { ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); udelay(1); if (!timeout--) { AUX_ERR("begin idle timeout 0x%08x", ctrl); return -EBUSY; } } while (ctrl & 0x03010000); /* set some magic, and wait up to 1ms for it to appear */ nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq); timeout = 1000; do { ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); udelay(1); if (!timeout--) { AUX_ERR("magic wait 0x%08x\n", ctrl); auxch_fini(dev, ch); return -EBUSY; } } while ((ctrl & 0x03000000) != urep); return 0; } static int auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size) { u32 ctrl, stat, timeout, retries; u32 xbuf[4] = {}; int ret, i; AUX_DBG("%d: 0x%08x %d\n", type, addr, size); ret = auxch_init(dev, ch); if (ret) goto out; stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50)); if (!(stat & 0x10000000)) { AUX_DBG("sink not detected\n"); ret = -ENXIO; goto out; } if (!(type & 1)) { memcpy(xbuf, data, size); for (i = 0; i < 16; i += 4) { AUX_DBG("wr 0x%08x\n", xbuf[i / 4]); nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]); } } ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); ctrl &= ~0x0001f0ff; ctrl |= type << 12; ctrl |= size - 1; nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr); /* retry transaction a number of times on failure... */ ret = -EREMOTEIO; for (retries = 0; retries < 32; retries++) { /* reset, and delay a while if this is a retry */ nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl); nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl); if (retries) udelay(400); /* transaction request, wait up to 1ms for it to complete */ nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl); timeout = 1000; do { ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50)); udelay(1); if (!timeout--) { AUX_ERR("tx req timeout 0x%08x\n", ctrl); goto out; } } while (ctrl & 0x00010000); /* read status, and check if transaction completed ok */ stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0); if (!(stat & 0x000f0f00)) { ret = 0; break; } AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat); } if (type & 1) { for (i = 0; i < 16; i += 4) { xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i); AUX_DBG("rd 0x%08x\n", xbuf[i / 4]); } memcpy(data, xbuf, size); } out: auxch_fini(dev, ch); return ret; } u8 * nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) { struct bit_entry d; u8 *table; int i; if (bit_table(dev, 'd', &d)) { NV_ERROR(dev, "BIT 'd' table not found\n"); return NULL; } if (d.version != 1) { NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version); return NULL; } table = ROMPTR(dev, d.data[0]); if (!table) { NV_ERROR(dev, "displayport table pointer invalid\n"); return NULL; } switch (table[0]) { case 0x20: case 0x21: case 0x30: break; default: NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]); return NULL; } for (i = 0; i < table[3]; i++) { *entry = ROMPTR(dev, table[table[1] + (i * table[2])]); if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0]))) return table; } NV_ERROR(dev, "displayport encoder table not found\n"); return NULL; } /****************************************************************************** * link training *****************************************************************************/ struct dp_state { struct dp_train_func *func; struct dcb_entry *dcb; int auxch; int crtc; u8 *dpcd; int link_nr; u32 link_bw; u8 stat[6]; u8 conf[4]; }; static void dp_set_link_config(struct drm_device *dev, struct dp_state *dp) { u8 sink[2]; NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); /* set desired link configuration on the source */ dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, dp->dpcd[2] & DP_ENHANCED_FRAME_CAP); /* inform the sink of the new configuration */ sink[0] = dp->link_bw / 27000; sink[1] = dp->link_nr; if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2); } static void dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) { u8 sink_tp; NV_DEBUG_KMS(dev, "training pattern %d\n", pattern); dp->func->train_set(dev, dp->dcb, pattern); auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1); sink_tp &= ~DP_TRAINING_PATTERN_MASK; sink_tp |= pattern; auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1); } static int dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) { int i; for (i = 0; i < dp->link_nr; i++) { u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf; u8 lpre = (lane & 0x0c) >> 2; u8 lvsw = (lane & 0x03) >> 0; dp->conf[i] = (lpre << 3) | lvsw; if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200) dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED; if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5) dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]); dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); } return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4); } static int dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay) { int ret; udelay(delay); ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6); if (ret) return ret; NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n", dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3], dp->stat[4], dp->stat[5]); return 0; } static int dp_link_train_cr(struct drm_device *dev, struct dp_state *dp) { bool cr_done = false, abort = false; int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK; int tries = 0, i; dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1); do { if (dp_link_train_commit(dev, dp) || dp_link_train_update(dev, dp, 100)) break; cr_done = true; for (i = 0; i < dp->link_nr; i++) { u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; if (!(lane & DP_LANE_CR_DONE)) { cr_done = false; if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED) abort = true; break; } } if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK; tries = 0; } } while (!cr_done && !abort && ++tries < 5); return cr_done ? 0 : -1; } static int dp_link_train_eq(struct drm_device *dev, struct dp_state *dp) { bool eq_done, cr_done = true; int tries = 0, i; dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2); do { if (dp_link_train_update(dev, dp, 400)) break; eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE); for (i = 0; i < dp->link_nr && eq_done; i++) { u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; if (!(lane & DP_LANE_CR_DONE)) cr_done = false; if (!(lane & DP_LANE_CHANNEL_EQ_DONE) || !(lane & DP_LANE_SYMBOL_LOCKED)) eq_done = false; } if (dp_link_train_commit(dev, dp)) break; } while (!eq_done && cr_done && ++tries <= 5); return eq_done ? 0 : -1; } static void dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable) { u16 script = 0x0000; u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); if (table) { if (table[0] >= 0x20 && table[0] <= 0x30) { if (enable) script = ROM16(entry[12]); else script = ROM16(entry[14]); } } nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); } static void dp_link_train_init(struct drm_device *dev, struct dp_state *dp) { u16 script = 0x0000; u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); if (table) { if (table[0] >= 0x20 && table[0] <= 0x30) script = ROM16(entry[6]); } nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); } static void dp_link_train_fini(struct drm_device *dev, struct dp_state *dp) { u16 script = 0x0000; u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); if (table) { if (table[0] >= 0x20 && table[0] <= 0x30) script = ROM16(entry[8]); } nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); } bool nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, struct dp_train_func *func) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); struct drm_device *dev = encoder->dev; struct nouveau_i2c_chan *auxch; const u32 bw_list[] = { 270000, 162000, 0 }; const u32 *link_bw = bw_list; struct dp_state dp; auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); if (!auxch) return false; dp.func = func; dp.dcb = nv_encoder->dcb; dp.crtc = nv_crtc->index; dp.auxch = auxch->drive; dp.dpcd = nv_encoder->dp.dpcd; /* adjust required bandwidth for 8B/10B coding overhead */ datarate = (datarate / 8) * 10; /* some sinks toggle hotplug in response to some of the actions * we take during link training (DP_SET_POWER is one), we need * to ignore them for the moment to avoid races. */ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false); /* enable down-spreading, if possible */ dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); /* execute pre-train script from vbios */ dp_link_train_init(dev, &dp); /* start off at highest link rate supported by encoder and display */ while (*link_bw > nv_encoder->dp.link_bw) link_bw++; while (link_bw[0]) { /* find minimum required lane count at this link rate */ dp.link_nr = nv_encoder->dp.link_nr; while ((dp.link_nr >> 1) * link_bw[0] > datarate) dp.link_nr >>= 1; /* drop link rate to minimum with this lane count */ while ((link_bw[1] * dp.link_nr) > datarate) link_bw++; dp.link_bw = link_bw[0]; /* program selected link configuration */ dp_set_link_config(dev, &dp); /* attempt to train the link at this configuration */ memset(dp.stat, 0x00, sizeof(dp.stat)); if (!dp_link_train_cr(dev, &dp) && !dp_link_train_eq(dev, &dp)) break; /* retry at lower rate */ link_bw++; } /* finish link training */ dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE); /* execute post-train script from vbios */ dp_link_train_fini(dev, &dp); /* re-enable hotplug detect */ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true); return true; } void nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, struct dp_train_func *func) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_i2c_chan *auxch; u8 status; auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index); if (!auxch) return; if (mode == DRM_MODE_DPMS_ON) status = DP_SET_POWER_D0; else status = DP_SET_POWER_D3; nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); if (mode == DRM_MODE_DPMS_ON) nouveau_dp_link_train(encoder, datarate, func); } bool nouveau_dp_detect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct nouveau_i2c_chan *auxch; u8 *dpcd = nv_encoder->dp.dpcd; int ret; auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); if (!auxch) return false; ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8); if (ret) return false; nv_encoder->dp.link_bw = 27000 * dpcd[1]; nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n", nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]); NV_DEBUG_KMS(dev, "encoder: %dx%d\n", nv_encoder->dcb->dpconf.link_nr, nv_encoder->dcb->dpconf.link_bw); if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr) nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw) nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw; NV_DEBUG_KMS(dev, "maximum: %dx%d\n", nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); return true; } int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, uint8_t *data, int data_nr) { return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr); } static int nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap; struct i2c_msg *msg = msgs; int ret, mcnt = num; while (mcnt--) { u8 remaining = msg->len; u8 *ptr = msg->buf; while (remaining) { u8 cnt = (remaining > 16) ? 16 : remaining; u8 cmd; if (msg->flags & I2C_M_RD) cmd = AUX_I2C_READ; else cmd = AUX_I2C_WRITE; if (mcnt || remaining > 16) cmd |= AUX_I2C_MOT; ret = nouveau_dp_auxch(auxch, cmd, msg->addr, ptr, cnt); if (ret < 0) return ret; ptr += cnt; remaining -= cnt; } msg++; } return num; } static u32 nouveau_dp_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } const struct i2c_algorithm nouveau_dp_i2c_algo = { .master_xfer = nouveau_dp_i2c_xfer, .functionality = nouveau_dp_i2c_func };
gpl-2.0
identisoft-rashid/ec3_kernel_pre_4.1
sound/pci/asihpi/hpi6000.c
1476
49832
/****************************************************************************** AudioScience HPI driver Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation; This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Hardware Programming Interface (HPI) for AudioScience ASI6200 series adapters. These PCI bus adapters are based on the TI C6711 DSP. Exported functions: void HPI_6000(struct hpi_message *phm, struct hpi_response *phr) #defines HIDE_PCI_ASSERTS to show the PCI asserts PROFILE_DSP2 get profile data from DSP2 if present (instead of DSP 1) (C) Copyright AudioScience Inc. 1998-2003 *******************************************************************************/ #define SOURCEFILE_NAME "hpi6000.c" #include "hpi_internal.h" #include "hpimsginit.h" #include "hpidebug.h" #include "hpi6000.h" #include "hpidspcd.h" #include "hpicmn.h" #define HPI_HIF_BASE (0x00000200) /* start of C67xx internal RAM */ #define HPI_HIF_ADDR(member) \ (HPI_HIF_BASE + offsetof(struct hpi_hif_6000, member)) #define HPI_HIF_ERROR_MASK 0x4000 /* HPI6000 specific error codes */ #define HPI6000_ERROR_BASE 900 /* not actually used anywhere */ /* operational/messaging errors */ #define HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT 901 #define HPI6000_ERROR_RESP_GET_LEN 902 #define HPI6000_ERROR_MSG_RESP_GET_RESP_ACK 903 #define HPI6000_ERROR_MSG_GET_ADR 904 #define HPI6000_ERROR_RESP_GET_ADR 905 #define HPI6000_ERROR_MSG_RESP_BLOCKWRITE32 906 #define HPI6000_ERROR_MSG_RESP_BLOCKREAD32 907 #define HPI6000_ERROR_CONTROL_CACHE_PARAMS 909 #define HPI6000_ERROR_SEND_DATA_IDLE_TIMEOUT 911 #define HPI6000_ERROR_SEND_DATA_ACK 912 #define HPI6000_ERROR_SEND_DATA_ADR 913 #define HPI6000_ERROR_SEND_DATA_TIMEOUT 914 #define HPI6000_ERROR_SEND_DATA_CMD 915 #define HPI6000_ERROR_SEND_DATA_WRITE 916 #define HPI6000_ERROR_SEND_DATA_IDLECMD 917 #define HPI6000_ERROR_GET_DATA_IDLE_TIMEOUT 921 #define HPI6000_ERROR_GET_DATA_ACK 922 #define HPI6000_ERROR_GET_DATA_CMD 923 #define HPI6000_ERROR_GET_DATA_READ 924 #define HPI6000_ERROR_GET_DATA_IDLECMD 925 #define HPI6000_ERROR_CONTROL_CACHE_ADDRLEN 951 #define HPI6000_ERROR_CONTROL_CACHE_READ 952 #define HPI6000_ERROR_CONTROL_CACHE_FLUSH 953 #define HPI6000_ERROR_MSG_RESP_GETRESPCMD 961 #define HPI6000_ERROR_MSG_RESP_IDLECMD 962 /* Initialisation/bootload errors */ #define HPI6000_ERROR_UNHANDLED_SUBSYS_ID 930 /* can't access PCI2040 */ #define HPI6000_ERROR_INIT_PCI2040 931 /* can't access DSP HPI i/f */ #define HPI6000_ERROR_INIT_DSPHPI 932 /* can't access internal DSP memory */ #define HPI6000_ERROR_INIT_DSPINTMEM 933 /* can't access SDRAM - test#1 */ #define HPI6000_ERROR_INIT_SDRAM1 934 /* can't access SDRAM - test#2 */ #define HPI6000_ERROR_INIT_SDRAM2 935 #define HPI6000_ERROR_INIT_VERIFY 938 #define HPI6000_ERROR_INIT_NOACK 939 #define HPI6000_ERROR_INIT_PLDTEST1 941 #define HPI6000_ERROR_INIT_PLDTEST2 942 /* local defines */ #define HIDE_PCI_ASSERTS #define PROFILE_DSP2 /* for PCI2040 i/f chip */ /* HPI CSR registers */ /* word offsets from CSR base */ /* use when io addresses defined as u32 * */ #define INTERRUPT_EVENT_SET 0 #define INTERRUPT_EVENT_CLEAR 1 #define INTERRUPT_MASK_SET 2 #define INTERRUPT_MASK_CLEAR 3 #define HPI_ERROR_REPORT 4 #define HPI_RESET 5 #define HPI_DATA_WIDTH 6 #define MAX_DSPS 2 /* HPI registers, spaced 8K bytes = 2K words apart */ #define DSP_SPACING 0x800 #define CONTROL 0x0000 #define ADDRESS 0x0200 #define DATA_AUTOINC 0x0400 #define DATA 0x0600 #define TIMEOUT 500000 struct dsp_obj { __iomem u32 *prHPI_control; __iomem u32 *prHPI_address; __iomem u32 *prHPI_data; __iomem u32 *prHPI_data_auto_inc; char c_dsp_rev; /*A, B */ u32 control_cache_address_on_dsp; u32 control_cache_length_on_dsp; struct hpi_adapter_obj *pa_parent_adapter; }; struct hpi_hw_obj { __iomem u32 *dw2040_HPICSR; __iomem u32 *dw2040_HPIDSP; u16 num_dsp; struct dsp_obj ado[MAX_DSPS]; u32 message_buffer_address_on_dsp; u32 response_buffer_address_on_dsp; u32 pCI2040HPI_error_count; struct hpi_control_cache_single control_cache[HPI_NMIXER_CONTROLS]; struct hpi_control_cache *p_cache; }; static u16 hpi6000_dsp_block_write32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *source, u32 count); static u16 hpi6000_dsp_block_read32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *dest, u32 count); static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao, u32 *pos_error_code); static short hpi6000_check_PCI2040_error_flag(struct hpi_adapter_obj *pao, u16 read_or_write); #define H6READ 1 #define H6WRITE 0 static short hpi6000_update_control_cache(struct hpi_adapter_obj *pao, struct hpi_message *phm); static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr); static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr); static short hpi6000_wait_dsp_ack(struct hpi_adapter_obj *pao, u16 dsp_index, u32 ack_value); static short hpi6000_send_host_command(struct hpi_adapter_obj *pao, u16 dsp_index, u32 host_cmd); static void hpi6000_send_dsp_interrupt(struct dsp_obj *pdo); static short hpi6000_send_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr); static short hpi6000_get_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr); static void hpi_write_word(struct dsp_obj *pdo, u32 address, u32 data); static u32 hpi_read_word(struct dsp_obj *pdo, u32 address); static void hpi_write_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length); static void hpi_read_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length); static void subsys_create_adapter(struct hpi_message *phm, struct hpi_response *phr); static void adapter_delete(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr); static void adapter_get_asserts(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr); static short create_adapter_obj(struct hpi_adapter_obj *pao, u32 *pos_error_code); static void delete_adapter_obj(struct hpi_adapter_obj *pao); /* local globals */ static u16 gw_pci_read_asserts; /* used to count PCI2040 errors */ static u16 gw_pci_write_asserts; /* used to count PCI2040 errors */ static void subsys_message(struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_SUBSYS_CREATE_ADAPTER: subsys_create_adapter(phm, phr); break; default: phr->error = HPI_ERROR_INVALID_FUNC; break; } } static void control_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; switch (phm->function) { case HPI_CONTROL_GET_STATE: if (pao->has_control_cache) { u16 err; err = hpi6000_update_control_cache(pao, phm); if (err) { if (err >= HPI_ERROR_BACKEND_BASE) { phr->error = HPI_ERROR_CONTROL_CACHING; phr->specific_error = err; } else { phr->error = err; } break; } if (hpi_check_control_cache(phw->p_cache, phm, phr)) break; } hw_message(pao, phm, phr); break; case HPI_CONTROL_SET_STATE: hw_message(pao, phm, phr); hpi_cmn_control_cache_sync_to_msg(phw->p_cache, phm, phr); break; case HPI_CONTROL_GET_INFO: default: hw_message(pao, phm, phr); break; } } static void adapter_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_ADAPTER_GET_ASSERT: adapter_get_asserts(pao, phm, phr); break; case HPI_ADAPTER_DELETE: adapter_delete(pao, phm, phr); break; default: hw_message(pao, phm, phr); break; } } static void outstream_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_OSTREAM_HOSTBUFFER_ALLOC: case HPI_OSTREAM_HOSTBUFFER_FREE: /* Don't let these messages go to the HW function because * they're called without locking the spinlock. * For the HPI6000 adapters the HW would return * HPI_ERROR_INVALID_FUNC anyway. */ phr->error = HPI_ERROR_INVALID_FUNC; break; default: hw_message(pao, phm, phr); return; } } static void instream_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { switch (phm->function) { case HPI_ISTREAM_HOSTBUFFER_ALLOC: case HPI_ISTREAM_HOSTBUFFER_FREE: /* Don't let these messages go to the HW function because * they're called without locking the spinlock. * For the HPI6000 adapters the HW would return * HPI_ERROR_INVALID_FUNC anyway. */ phr->error = HPI_ERROR_INVALID_FUNC; break; default: hw_message(pao, phm, phr); return; } } /************************************************************************/ /** HPI_6000() * Entry point from HPIMAN * All calls to the HPI start here */ void HPI_6000(struct hpi_message *phm, struct hpi_response *phr) { struct hpi_adapter_obj *pao = NULL; if (phm->object != HPI_OBJ_SUBSYSTEM) { pao = hpi_find_adapter(phm->adapter_index); if (!pao) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_BAD_ADAPTER_NUMBER); HPI_DEBUG_LOG(DEBUG, "invalid adapter index: %d \n", phm->adapter_index); return; } /* Don't even try to communicate with crashed DSP */ if (pao->dsp_crashed >= 10) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_DSP_HARDWARE); HPI_DEBUG_LOG(DEBUG, "adapter %d dsp crashed\n", phm->adapter_index); return; } } /* Init default response including the size field */ if (phm->function != HPI_SUBSYS_CREATE_ADAPTER) hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_PROCESSING_MESSAGE); switch (phm->type) { case HPI_TYPE_REQUEST: switch (phm->object) { case HPI_OBJ_SUBSYSTEM: subsys_message(phm, phr); break; case HPI_OBJ_ADAPTER: phr->size = sizeof(struct hpi_response_header) + sizeof(struct hpi_adapter_res); adapter_message(pao, phm, phr); break; case HPI_OBJ_CONTROL: control_message(pao, phm, phr); break; case HPI_OBJ_OSTREAM: outstream_message(pao, phm, phr); break; case HPI_OBJ_ISTREAM: instream_message(pao, phm, phr); break; default: hw_message(pao, phm, phr); break; } break; default: phr->error = HPI_ERROR_INVALID_TYPE; break; } } /************************************************************************/ /* SUBSYSTEM */ /* create an adapter object and initialise it based on resource information * passed in in the message * NOTE - you cannot use this function AND the FindAdapters function at the * same time, the application must use only one of them to get the adapters */ static void subsys_create_adapter(struct hpi_message *phm, struct hpi_response *phr) { /* create temp adapter obj, because we don't know what index yet */ struct hpi_adapter_obj ao; struct hpi_adapter_obj *pao; u32 os_error_code; u16 err = 0; u32 dsp_index = 0; HPI_DEBUG_LOG(VERBOSE, "subsys_create_adapter\n"); memset(&ao, 0, sizeof(ao)); ao.priv = kzalloc(sizeof(struct hpi_hw_obj), GFP_KERNEL); if (!ao.priv) { HPI_DEBUG_LOG(ERROR, "can't get mem for adapter object\n"); phr->error = HPI_ERROR_MEMORY_ALLOC; return; } /* create the adapter object based on the resource information */ ao.pci = *phm->u.s.resource.r.pci; err = create_adapter_obj(&ao, &os_error_code); if (err) { delete_adapter_obj(&ao); if (err >= HPI_ERROR_BACKEND_BASE) { phr->error = HPI_ERROR_DSP_BOOTLOAD; phr->specific_error = err; } else { phr->error = err; } phr->u.s.data = os_error_code; return; } /* need to update paParentAdapter */ pao = hpi_find_adapter(ao.index); if (!pao) { /* We just added this adapter, why can't we find it!? */ HPI_DEBUG_LOG(ERROR, "lost adapter after boot\n"); phr->error = HPI_ERROR_BAD_ADAPTER; return; } for (dsp_index = 0; dsp_index < MAX_DSPS; dsp_index++) { struct hpi_hw_obj *phw = pao->priv; phw->ado[dsp_index].pa_parent_adapter = pao; } phr->u.s.adapter_type = ao.type; phr->u.s.adapter_index = ao.index; phr->error = 0; } static void adapter_delete(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { delete_adapter_obj(pao); hpi_delete_adapter(pao); phr->error = 0; } /* this routine is called from SubSysFindAdapter and SubSysCreateAdapter */ static short create_adapter_obj(struct hpi_adapter_obj *pao, u32 *pos_error_code) { short boot_error = 0; u32 dsp_index = 0; u32 control_cache_size = 0; u32 control_cache_count = 0; struct hpi_hw_obj *phw = pao->priv; /* The PCI2040 has the following address map */ /* BAR0 - 4K = HPI control and status registers on PCI2040 (HPI CSR) */ /* BAR1 - 32K = HPI registers on DSP */ phw->dw2040_HPICSR = pao->pci.ap_mem_base[0]; phw->dw2040_HPIDSP = pao->pci.ap_mem_base[1]; HPI_DEBUG_LOG(VERBOSE, "csr %p, dsp %p\n", phw->dw2040_HPICSR, phw->dw2040_HPIDSP); /* set addresses for the possible DSP HPI interfaces */ for (dsp_index = 0; dsp_index < MAX_DSPS; dsp_index++) { phw->ado[dsp_index].prHPI_control = phw->dw2040_HPIDSP + (CONTROL + DSP_SPACING * dsp_index); phw->ado[dsp_index].prHPI_address = phw->dw2040_HPIDSP + (ADDRESS + DSP_SPACING * dsp_index); phw->ado[dsp_index].prHPI_data = phw->dw2040_HPIDSP + (DATA + DSP_SPACING * dsp_index); phw->ado[dsp_index].prHPI_data_auto_inc = phw->dw2040_HPIDSP + (DATA_AUTOINC + DSP_SPACING * dsp_index); HPI_DEBUG_LOG(VERBOSE, "ctl %p, adr %p, dat %p, dat++ %p\n", phw->ado[dsp_index].prHPI_control, phw->ado[dsp_index].prHPI_address, phw->ado[dsp_index].prHPI_data, phw->ado[dsp_index].prHPI_data_auto_inc); phw->ado[dsp_index].pa_parent_adapter = pao; } phw->pCI2040HPI_error_count = 0; pao->has_control_cache = 0; /* Set the default number of DSPs on this card */ /* This is (conditionally) adjusted after bootloading */ /* of the first DSP in the bootload section. */ phw->num_dsp = 1; boot_error = hpi6000_adapter_boot_load_dsp(pao, pos_error_code); if (boot_error) return boot_error; HPI_DEBUG_LOG(INFO, "bootload DSP OK\n"); phw->message_buffer_address_on_dsp = 0L; phw->response_buffer_address_on_dsp = 0L; /* get info about the adapter by asking the adapter */ /* send a HPI_ADAPTER_GET_INFO message */ { struct hpi_message hm; struct hpi_response hr0; /* response from DSP 0 */ struct hpi_response hr1; /* response from DSP 1 */ u16 error = 0; HPI_DEBUG_LOG(VERBOSE, "send ADAPTER_GET_INFO\n"); memset(&hm, 0, sizeof(hm)); hm.type = HPI_TYPE_REQUEST; hm.size = sizeof(struct hpi_message); hm.object = HPI_OBJ_ADAPTER; hm.function = HPI_ADAPTER_GET_INFO; hm.adapter_index = 0; memset(&hr0, 0, sizeof(hr0)); memset(&hr1, 0, sizeof(hr1)); hr0.size = sizeof(hr0); hr1.size = sizeof(hr1); error = hpi6000_message_response_sequence(pao, 0, &hm, &hr0); if (hr0.error) { HPI_DEBUG_LOG(DEBUG, "message error %d\n", hr0.error); return hr0.error; } if (phw->num_dsp == 2) { error = hpi6000_message_response_sequence(pao, 1, &hm, &hr1); if (error) return error; } pao->type = hr0.u.ax.info.adapter_type; pao->index = hr0.u.ax.info.adapter_index; } memset(&phw->control_cache[0], 0, sizeof(struct hpi_control_cache_single) * HPI_NMIXER_CONTROLS); /* Read the control cache length to figure out if it is turned on */ control_cache_size = hpi_read_word(&phw->ado[0], HPI_HIF_ADDR(control_cache_size_in_bytes)); if (control_cache_size) { control_cache_count = hpi_read_word(&phw->ado[0], HPI_HIF_ADDR(control_cache_count)); phw->p_cache = hpi_alloc_control_cache(control_cache_count, control_cache_size, (unsigned char *) &phw->control_cache[0] ); if (phw->p_cache) pao->has_control_cache = 1; } HPI_DEBUG_LOG(DEBUG, "get adapter info ASI%04X index %d\n", pao->type, pao->index); if (phw->p_cache) phw->p_cache->adap_idx = pao->index; return hpi_add_adapter(pao); } static void delete_adapter_obj(struct hpi_adapter_obj *pao) { struct hpi_hw_obj *phw = pao->priv; if (pao->has_control_cache) hpi_free_control_cache(phw->p_cache); /* reset DSPs on adapter */ iowrite32(0x0003000F, phw->dw2040_HPICSR + HPI_RESET); kfree(phw); } /************************************************************************/ /* ADAPTER */ static void adapter_get_asserts(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { #ifndef HIDE_PCI_ASSERTS /* if we have PCI2040 asserts then collect them */ if ((gw_pci_read_asserts > 0) || (gw_pci_write_asserts > 0)) { phr->u.ax.assert.p1 = gw_pci_read_asserts * 100 + gw_pci_write_asserts; phr->u.ax.assert.p2 = 0; phr->u.ax.assert.count = 1; /* assert count */ phr->u.ax.assert.dsp_index = -1; /* "dsp index" */ strcpy(phr->u.ax.assert.sz_message, "PCI2040 error"); phr->u.ax.assert.dsp_msg_addr = 0; gw_pci_read_asserts = 0; gw_pci_write_asserts = 0; phr->error = 0; } else #endif hw_message(pao, phm, phr); /*get DSP asserts */ return; } /************************************************************************/ /* LOW-LEVEL */ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao, u32 *pos_error_code) { struct hpi_hw_obj *phw = pao->priv; short error; u32 timeout; u32 read = 0; u32 i = 0; u32 data = 0; u32 j = 0; u32 test_addr = 0x80000000; u32 test_data = 0x00000001; u32 dw2040_reset = 0; u32 dsp_index = 0; u32 endian = 0; u32 adapter_info = 0; u32 delay = 0; struct dsp_code dsp_code; u16 boot_load_family = 0; /* NOTE don't use wAdapterType in this routine. It is not setup yet */ switch (pao->pci.pci_dev->subsystem_device) { case 0x5100: case 0x5110: /* ASI5100 revB or higher with C6711D */ case 0x5200: /* ASI5200 PCIe version of ASI5100 */ case 0x6100: case 0x6200: boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x6200); break; default: return HPI6000_ERROR_UNHANDLED_SUBSYS_ID; } /* reset all DSPs, indicate two DSPs are present * set RST3-=1 to disconnect HAD8 to set DSP in little endian mode */ endian = 0; dw2040_reset = 0x0003000F; iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); /* read back register to make sure PCI2040 chip is functioning * note that bits 4..15 are read-only and so should always return zero, * even though we wrote 1 to them */ hpios_delay_micro_seconds(1000); delay = ioread32(phw->dw2040_HPICSR + HPI_RESET); if (delay != dw2040_reset) { HPI_DEBUG_LOG(ERROR, "INIT_PCI2040 %x %x\n", dw2040_reset, delay); return HPI6000_ERROR_INIT_PCI2040; } /* Indicate that DSP#0,1 is a C6X */ iowrite32(0x00000003, phw->dw2040_HPICSR + HPI_DATA_WIDTH); /* set Bit30 and 29 - which will prevent Target aborts from being * issued upon HPI or GP error */ iowrite32(0x60000000, phw->dw2040_HPICSR + INTERRUPT_MASK_SET); /* isolate DSP HAD8 line from PCI2040 so that * Little endian can be set by pullup */ dw2040_reset = dw2040_reset & (~(endian << 3)); iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); phw->ado[0].c_dsp_rev = 'B'; /* revB */ phw->ado[1].c_dsp_rev = 'B'; /* revB */ /*Take both DSPs out of reset, setting HAD8 to the correct Endian */ dw2040_reset = dw2040_reset & (~0x00000001); /* start DSP 0 */ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); dw2040_reset = dw2040_reset & (~0x00000002); /* start DSP 1 */ iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); /* set HAD8 back to PCI2040, now that DSP set to little endian mode */ dw2040_reset = dw2040_reset & (~0x00000008); iowrite32(dw2040_reset, phw->dw2040_HPICSR + HPI_RESET); /*delay to allow DSP to get going */ hpios_delay_micro_seconds(100); /* loop through all DSPs, downloading DSP code */ for (dsp_index = 0; dsp_index < phw->num_dsp; dsp_index++) { struct dsp_obj *pdo = &phw->ado[dsp_index]; /* configure DSP so that we download code into the SRAM */ /* set control reg for little endian, HWOB=1 */ iowrite32(0x00010001, pdo->prHPI_control); /* test access to the HPI address register (HPIA) */ test_data = 0x00000001; for (j = 0; j < 32; j++) { iowrite32(test_data, pdo->prHPI_address); data = ioread32(pdo->prHPI_address); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "INIT_DSPHPI %x %x %x\n", test_data, data, dsp_index); return HPI6000_ERROR_INIT_DSPHPI; } test_data = test_data << 1; } /* if C6713 the setup PLL to generate 225MHz from 25MHz. * Since the PLLDIV1 read is sometimes wrong, even on a C6713, * we're going to do this unconditionally */ /* PLLDIV1 should have a value of 8000 after reset */ /* if (HpiReadWord(pdo,0x01B7C118) == 0x8000) */ { /* C6713 datasheet says we cannot program PLL from HPI, * and indeed if we try to set the PLL multiply from the * HPI, the PLL does not seem to lock, * so we enable the PLL and use the default of x 7 */ /* bypass PLL */ hpi_write_word(pdo, 0x01B7C100, 0x0000); hpios_delay_micro_seconds(100); /* ** use default of PLL x7 ** */ /* EMIF = 225/3=75MHz */ hpi_write_word(pdo, 0x01B7C120, 0x8002); hpios_delay_micro_seconds(100); /* peri = 225/2 */ hpi_write_word(pdo, 0x01B7C11C, 0x8001); hpios_delay_micro_seconds(100); /* cpu = 225/1 */ hpi_write_word(pdo, 0x01B7C118, 0x8000); /* ~2ms delay */ hpios_delay_micro_seconds(2000); /* PLL not bypassed */ hpi_write_word(pdo, 0x01B7C100, 0x0001); /* ~2ms delay */ hpios_delay_micro_seconds(2000); } /* test r/w to internal DSP memory * C6711 has L2 cache mapped to 0x0 when reset * * revB - because of bug 3.0.1 last HPI read * (before HPI address issued) must be non-autoinc */ /* test each bit in the 32bit word */ for (i = 0; i < 100; i++) { test_addr = 0x00000000; test_data = 0x00000001; for (j = 0; j < 32; j++) { hpi_write_word(pdo, test_addr + i, test_data); data = hpi_read_word(pdo, test_addr + i); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "DSP mem %x %x %x %x\n", test_addr + i, test_data, data, dsp_index); return HPI6000_ERROR_INIT_DSPINTMEM; } test_data = test_data << 1; } } /* memory map of ASI6200 00000000-0000FFFF 16Kx32 internal program 01800000-019FFFFF Internal peripheral 80000000-807FFFFF CE0 2Mx32 SDRAM running @ 100MHz 90000000-9000FFFF CE1 Async peripherals: EMIF config ------------ Global EMIF control 0 - 1 - 2 - 3 CLK2EN = 1 CLKOUT2 enabled 4 CLK1EN = 0 CLKOUT1 disabled 5 EKEN = 1 <--!! C6713 specific, enables ECLKOUT 6 - 7 NOHOLD = 1 external HOLD disabled 8 HOLDA = 0 HOLDA output is low 9 HOLD = 0 HOLD input is low 10 ARDY = 1 ARDY input is high 11 BUSREQ = 0 BUSREQ output is low 12,13 Reserved = 1 */ hpi_write_word(pdo, 0x01800000, 0x34A8); /* EMIF CE0 setup - 2Mx32 Sync DRAM 31..28 Wr setup 27..22 Wr strobe 21..20 Wr hold 19..16 Rd setup 15..14 - 13..8 Rd strobe 7..4 MTYPE 0011 Sync DRAM 32bits 3 Wr hold MSB 2..0 Rd hold */ hpi_write_word(pdo, 0x01800008, 0x00000030); /* EMIF SDRAM Extension 31-21 0 20 WR2RD = 0 19-18 WR2DEAC = 1 17 WR2WR = 0 16-15 R2WDQM = 2 14-12 RD2WR = 4 11-10 RD2DEAC = 1 9 RD2RD = 1 8-7 THZP = 10b 6-5 TWR = 2-1 = 01b (tWR = 10ns) 4 TRRD = 0b = 2 ECLK (tRRD = 14ns) 3-1 TRAS = 5-1 = 100b (Tras=42ns = 5 ECLK) 1 CAS latency = 3 ECLK (for Micron 2M32-7 operating at 100Mhz) */ /* need to use this else DSP code crashes */ hpi_write_word(pdo, 0x01800020, 0x001BDF29); /* EMIF SDRAM control - set up for a 2Mx32 SDRAM (512x32x4 bank) 31 - - 30 SDBSZ 1 4 bank 29..28 SDRSZ 00 11 row address pins 27..26 SDCSZ 01 8 column address pins 25 RFEN 1 refersh enabled 24 INIT 1 init SDRAM 23..20 TRCD 0001 19..16 TRP 0001 15..12 TRC 0110 11..0 - - */ /* need to use this else DSP code crashes */ hpi_write_word(pdo, 0x01800018, 0x47117000); /* EMIF SDRAM Refresh Timing */ hpi_write_word(pdo, 0x0180001C, 0x00000410); /*MIF CE1 setup - Async peripherals @100MHz bus speed, each cycle is 10ns, 31..28 Wr setup = 1 27..22 Wr strobe = 3 30ns 21..20 Wr hold = 1 19..16 Rd setup =1 15..14 Ta = 2 13..8 Rd strobe = 3 30ns 7..4 MTYPE 0010 Async 32bits 3 Wr hold MSB =0 2..0 Rd hold = 1 */ { u32 cE1 = (1L << 28) | (3L << 22) | (1L << 20) | (1L << 16) | (2L << 14) | (3L << 8) | (2L << 4) | 1L; hpi_write_word(pdo, 0x01800004, cE1); } /* delay a little to allow SDRAM and DSP to "get going" */ hpios_delay_micro_seconds(1000); /* test access to SDRAM */ { test_addr = 0x80000000; test_data = 0x00000001; /* test each bit in the 32bit word */ for (j = 0; j < 32; j++) { hpi_write_word(pdo, test_addr, test_data); data = hpi_read_word(pdo, test_addr); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "DSP dram %x %x %x %x\n", test_addr, test_data, data, dsp_index); return HPI6000_ERROR_INIT_SDRAM1; } test_data = test_data << 1; } /* test every Nth address in the DRAM */ #define DRAM_SIZE_WORDS 0x200000 /*2_mx32 */ #define DRAM_INC 1024 test_addr = 0x80000000; test_data = 0x0; for (i = 0; i < DRAM_SIZE_WORDS; i = i + DRAM_INC) { hpi_write_word(pdo, test_addr + i, test_data); test_data++; } test_addr = 0x80000000; test_data = 0x0; for (i = 0; i < DRAM_SIZE_WORDS; i = i + DRAM_INC) { data = hpi_read_word(pdo, test_addr + i); if (data != test_data) { HPI_DEBUG_LOG(ERROR, "DSP dram %x %x %x %x\n", test_addr + i, test_data, data, dsp_index); return HPI6000_ERROR_INIT_SDRAM2; } test_data++; } } /* write the DSP code down into the DSPs memory */ error = hpi_dsp_code_open(boot_load_family, pao->pci.pci_dev, &dsp_code, pos_error_code); if (error) return error; while (1) { u32 length; u32 address; u32 type; u32 *pcode; error = hpi_dsp_code_read_word(&dsp_code, &length); if (error) break; if (length == 0xFFFFFFFF) break; /* end of code */ error = hpi_dsp_code_read_word(&dsp_code, &address); if (error) break; error = hpi_dsp_code_read_word(&dsp_code, &type); if (error) break; error = hpi_dsp_code_read_block(length, &dsp_code, &pcode); if (error) break; error = hpi6000_dsp_block_write32(pao, (u16)dsp_index, address, pcode, length); if (error) break; } if (error) { hpi_dsp_code_close(&dsp_code); return error; } /* verify that code was written correctly */ /* this time through, assume no errors in DSP code file/array */ hpi_dsp_code_rewind(&dsp_code); while (1) { u32 length; u32 address; u32 type; u32 *pcode; hpi_dsp_code_read_word(&dsp_code, &length); if (length == 0xFFFFFFFF) break; /* end of code */ hpi_dsp_code_read_word(&dsp_code, &address); hpi_dsp_code_read_word(&dsp_code, &type); hpi_dsp_code_read_block(length, &dsp_code, &pcode); for (i = 0; i < length; i++) { data = hpi_read_word(pdo, address); if (data != *pcode) { error = HPI6000_ERROR_INIT_VERIFY; HPI_DEBUG_LOG(ERROR, "DSP verify %x %x %x %x\n", address, *pcode, data, dsp_index); break; } pcode++; address += 4; } if (error) break; } hpi_dsp_code_close(&dsp_code); if (error) return error; /* zero out the hostmailbox */ { u32 address = HPI_HIF_ADDR(host_cmd); for (i = 0; i < 4; i++) { hpi_write_word(pdo, address, 0); address += 4; } } /* write the DSP number into the hostmailbox */ /* structure before starting the DSP */ hpi_write_word(pdo, HPI_HIF_ADDR(dsp_number), dsp_index); /* write the DSP adapter Info into the */ /* hostmailbox before starting the DSP */ if (dsp_index > 0) hpi_write_word(pdo, HPI_HIF_ADDR(adapter_info), adapter_info); /* step 3. Start code by sending interrupt */ iowrite32(0x00030003, pdo->prHPI_control); hpios_delay_micro_seconds(10000); /* wait for a non-zero value in hostcmd - * indicating initialization is complete * * Init could take a while if DSP checks SDRAM memory * Was 200000. Increased to 2000000 for ASI8801 so we * don't get 938 errors. */ timeout = 2000000; while (timeout) { do { read = hpi_read_word(pdo, HPI_HIF_ADDR(host_cmd)); } while (--timeout && hpi6000_check_PCI2040_error_flag(pao, H6READ)); if (read) break; /* The following is a workaround for bug #94: * Bluescreen on install and subsequent boots on a * DELL PowerEdge 600SC PC with 1.8GHz P4 and * ServerWorks chipset. Without this delay the system * locks up with a bluescreen (NOT GPF or pagefault). */ else hpios_delay_micro_seconds(10000); } if (timeout == 0) return HPI6000_ERROR_INIT_NOACK; /* read the DSP adapter Info from the */ /* hostmailbox structure after starting the DSP */ if (dsp_index == 0) { /*u32 dwTestData=0; */ u32 mask = 0; adapter_info = hpi_read_word(pdo, HPI_HIF_ADDR(adapter_info)); if (HPI_ADAPTER_FAMILY_ASI (HPI_HIF_ADAPTER_INFO_EXTRACT_ADAPTER (adapter_info)) == HPI_ADAPTER_FAMILY_ASI(0x6200)) /* all 6200 cards have this many DSPs */ phw->num_dsp = 2; /* test that the PLD is programmed */ /* and we can read/write 24bits */ #define PLD_BASE_ADDRESS 0x90000000L /*for ASI6100/6200/8800 */ switch (boot_load_family) { case HPI_ADAPTER_FAMILY_ASI(0x6200): /* ASI6100/6200 has 24bit path to FPGA */ mask = 0xFFFFFF00L; /* ASI5100 uses AX6 code, */ /* but has no PLD r/w register to test */ if (HPI_ADAPTER_FAMILY_ASI(pao->pci.pci_dev-> subsystem_device) == HPI_ADAPTER_FAMILY_ASI(0x5100)) mask = 0x00000000L; /* ASI5200 uses AX6 code, */ /* but has no PLD r/w register to test */ if (HPI_ADAPTER_FAMILY_ASI(pao->pci.pci_dev-> subsystem_device) == HPI_ADAPTER_FAMILY_ASI(0x5200)) mask = 0x00000000L; break; case HPI_ADAPTER_FAMILY_ASI(0x8800): /* ASI8800 has 16bit path to FPGA */ mask = 0xFFFF0000L; break; } test_data = 0xAAAAAA00L & mask; /* write to 24 bit Debug register (D31-D8) */ hpi_write_word(pdo, PLD_BASE_ADDRESS + 4L, test_data); read = hpi_read_word(pdo, PLD_BASE_ADDRESS + 4L) & mask; if (read != test_data) { HPI_DEBUG_LOG(ERROR, "PLD %x %x\n", test_data, read); return HPI6000_ERROR_INIT_PLDTEST1; } test_data = 0x55555500L & mask; hpi_write_word(pdo, PLD_BASE_ADDRESS + 4L, test_data); read = hpi_read_word(pdo, PLD_BASE_ADDRESS + 4L) & mask; if (read != test_data) { HPI_DEBUG_LOG(ERROR, "PLD %x %x\n", test_data, read); return HPI6000_ERROR_INIT_PLDTEST2; } } } /* for numDSP */ return 0; } #define PCI_TIMEOUT 100 static int hpi_set_address(struct dsp_obj *pdo, u32 address) { u32 timeout = PCI_TIMEOUT; do { iowrite32(address, pdo->prHPI_address); } while (hpi6000_check_PCI2040_error_flag(pdo->pa_parent_adapter, H6WRITE) && --timeout); if (timeout) return 0; return 1; } /* write one word to the HPI port */ static void hpi_write_word(struct dsp_obj *pdo, u32 address, u32 data) { if (hpi_set_address(pdo, address)) return; iowrite32(data, pdo->prHPI_data); } /* read one word from the HPI port */ static u32 hpi_read_word(struct dsp_obj *pdo, u32 address) { u32 data = 0; if (hpi_set_address(pdo, address)) return 0; /*? No way to return error */ /* take care of errata in revB DSP (2.0.1) */ data = ioread32(pdo->prHPI_data); return data; } /* write a block of 32bit words to the DSP HPI port using auto-inc mode */ static void hpi_write_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length) { u16 length16 = length - 1; if (length == 0) return; if (hpi_set_address(pdo, address)) return; iowrite32_rep(pdo->prHPI_data_auto_inc, pdata, length16); /* take care of errata in revB DSP (2.0.1) */ /* must end with non auto-inc */ iowrite32(*(pdata + length - 1), pdo->prHPI_data); } /** read a block of 32bit words from the DSP HPI port using auto-inc mode */ static void hpi_read_block(struct dsp_obj *pdo, u32 address, u32 *pdata, u32 length) { u16 length16 = length - 1; if (length == 0) return; if (hpi_set_address(pdo, address)) return; ioread32_rep(pdo->prHPI_data_auto_inc, pdata, length16); /* take care of errata in revB DSP (2.0.1) */ /* must end with non auto-inc */ *(pdata + length - 1) = ioread32(pdo->prHPI_data); } static u16 hpi6000_dsp_block_write32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *source, u32 count) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 time_out = PCI_TIMEOUT; int c6711_burst_size = 128; u32 local_hpi_address = hpi_address; int local_count = count; int xfer_size; u32 *pdata = source; while (local_count) { if (local_count > c6711_burst_size) xfer_size = c6711_burst_size; else xfer_size = local_count; time_out = PCI_TIMEOUT; do { hpi_write_block(pdo, local_hpi_address, pdata, xfer_size); } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE) && --time_out); if (!time_out) break; pdata += xfer_size; local_hpi_address += sizeof(u32) * xfer_size; local_count -= xfer_size; } if (time_out) return 0; else return 1; } static u16 hpi6000_dsp_block_read32(struct hpi_adapter_obj *pao, u16 dsp_index, u32 hpi_address, u32 *dest, u32 count) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 time_out = PCI_TIMEOUT; int c6711_burst_size = 16; u32 local_hpi_address = hpi_address; int local_count = count; int xfer_size; u32 *pdata = dest; u32 loop_count = 0; while (local_count) { if (local_count > c6711_burst_size) xfer_size = c6711_burst_size; else xfer_size = local_count; time_out = PCI_TIMEOUT; do { hpi_read_block(pdo, local_hpi_address, pdata, xfer_size); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --time_out); if (!time_out) break; pdata += xfer_size; local_hpi_address += sizeof(u32) * xfer_size; local_count -= xfer_size; loop_count++; } if (time_out) return 0; else return 1; } static short hpi6000_message_response_sequence(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 timeout; u16 ack; u32 address; u32 length; u32 *p_data; u16 error = 0; ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE); if (ack & HPI_HIF_ERROR_MASK) { pao->dsp_crashed++; return HPI6000_ERROR_MSG_RESP_IDLE_TIMEOUT; } pao->dsp_crashed = 0; /* get the message address and size */ if (phw->message_buffer_address_on_dsp == 0) { timeout = TIMEOUT; do { address = hpi_read_word(pdo, HPI_HIF_ADDR(message_buffer_address)); phw->message_buffer_address_on_dsp = address; } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) return HPI6000_ERROR_MSG_GET_ADR; } else address = phw->message_buffer_address_on_dsp; length = phm->size; /* send the message */ p_data = (u32 *)phm; if (hpi6000_dsp_block_write32(pao, dsp_index, address, p_data, (u16)length / 4)) return HPI6000_ERROR_MSG_RESP_BLOCKWRITE32; if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_GET_RESP)) return HPI6000_ERROR_MSG_RESP_GETRESPCMD; hpi6000_send_dsp_interrupt(pdo); ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_GET_RESP); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_MSG_RESP_GET_RESP_ACK; /* get the response address */ if (phw->response_buffer_address_on_dsp == 0) { timeout = TIMEOUT; do { address = hpi_read_word(pdo, HPI_HIF_ADDR(response_buffer_address)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); phw->response_buffer_address_on_dsp = address; if (!timeout) return HPI6000_ERROR_RESP_GET_ADR; } else address = phw->response_buffer_address_on_dsp; /* read the length of the response back from the DSP */ timeout = TIMEOUT; do { length = hpi_read_word(pdo, HPI_HIF_ADDR(length)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) return HPI6000_ERROR_RESP_GET_LEN; if (length > phr->size) return HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL; /* get the response */ p_data = (u32 *)phr; if (hpi6000_dsp_block_read32(pao, dsp_index, address, p_data, (u16)length / 4)) return HPI6000_ERROR_MSG_RESP_BLOCKREAD32; /* set i/f back to idle */ if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE)) return HPI6000_ERROR_MSG_RESP_IDLECMD; hpi6000_send_dsp_interrupt(pdo); error = hpi_validate_response(phm, phr); return error; } /* have to set up the below defines to match stuff in the MAP file */ #define MSG_ADDRESS (HPI_HIF_BASE+0x18) #define MSG_LENGTH 11 #define RESP_ADDRESS (HPI_HIF_BASE+0x44) #define RESP_LENGTH 16 #define QUEUE_START (HPI_HIF_BASE+0x88) #define QUEUE_SIZE 0x8000 static short hpi6000_send_data_check_adr(u32 address, u32 length_in_dwords) { /*#define CHECKING // comment this line in to enable checking */ #ifdef CHECKING if (address < (u32)MSG_ADDRESS) return 0; if (address > (u32)(QUEUE_START + QUEUE_SIZE)) return 0; if ((address + (length_in_dwords << 2)) > (u32)(QUEUE_START + QUEUE_SIZE)) return 0; #else (void)address; (void)length_in_dwords; return 1; #endif } static short hpi6000_send_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 data_sent = 0; u16 ack; u32 length, address; u32 *p_data = (u32 *)phm->u.d.u.data.pb_data; u16 time_out = 8; (void)phr; /* round dwDataSize down to nearest 4 bytes */ while ((data_sent < (phm->u.d.u.data.data_size & ~3L)) && --time_out) { ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_SEND_DATA_IDLE_TIMEOUT; if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_SEND_DATA)) return HPI6000_ERROR_SEND_DATA_CMD; hpi6000_send_dsp_interrupt(pdo); ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_SEND_DATA); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_SEND_DATA_ACK; do { /* get the address and size */ address = hpi_read_word(pdo, HPI_HIF_ADDR(address)); /* DSP returns number of DWORDS */ length = hpi_read_word(pdo, HPI_HIF_ADDR(length)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)); if (!hpi6000_send_data_check_adr(address, length)) return HPI6000_ERROR_SEND_DATA_ADR; /* send the data. break data into 512 DWORD blocks (2K bytes) * and send using block write. 2Kbytes is the max as this is the * memory window given to the HPI data register by the PCI2040 */ { u32 len = length; u32 blk_len = 512; while (len) { if (len < blk_len) blk_len = len; if (hpi6000_dsp_block_write32(pao, dsp_index, address, p_data, blk_len)) return HPI6000_ERROR_SEND_DATA_WRITE; address += blk_len * 4; p_data += blk_len; len -= blk_len; } } if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE)) return HPI6000_ERROR_SEND_DATA_IDLECMD; hpi6000_send_dsp_interrupt(pdo); data_sent += length * 4; } if (!time_out) return HPI6000_ERROR_SEND_DATA_TIMEOUT; return 0; } static short hpi6000_get_data(struct hpi_adapter_obj *pao, u16 dsp_index, struct hpi_message *phm, struct hpi_response *phr) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 data_got = 0; u16 ack; u32 length, address; u32 *p_data = (u32 *)phm->u.d.u.data.pb_data; (void)phr; /* this parameter not used! */ /* round dwDataSize down to nearest 4 bytes */ while (data_got < (phm->u.d.u.data.data_size & ~3L)) { ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_IDLE); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_GET_DATA_IDLE_TIMEOUT; if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_GET_DATA)) return HPI6000_ERROR_GET_DATA_CMD; hpi6000_send_dsp_interrupt(pdo); ack = hpi6000_wait_dsp_ack(pao, dsp_index, HPI_HIF_GET_DATA); if (ack & HPI_HIF_ERROR_MASK) return HPI6000_ERROR_GET_DATA_ACK; /* get the address and size */ do { address = hpi_read_word(pdo, HPI_HIF_ADDR(address)); length = hpi_read_word(pdo, HPI_HIF_ADDR(length)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ)); /* read the data */ { u32 len = length; u32 blk_len = 512; while (len) { if (len < blk_len) blk_len = len; if (hpi6000_dsp_block_read32(pao, dsp_index, address, p_data, blk_len)) return HPI6000_ERROR_GET_DATA_READ; address += blk_len * 4; p_data += blk_len; len -= blk_len; } } if (hpi6000_send_host_command(pao, dsp_index, HPI_HIF_IDLE)) return HPI6000_ERROR_GET_DATA_IDLECMD; hpi6000_send_dsp_interrupt(pdo); data_got += length * 4; } return 0; } static void hpi6000_send_dsp_interrupt(struct dsp_obj *pdo) { iowrite32(0x00030003, pdo->prHPI_control); /* DSPINT */ } static short hpi6000_send_host_command(struct hpi_adapter_obj *pao, u16 dsp_index, u32 host_cmd) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 timeout = TIMEOUT; /* set command */ do { hpi_write_word(pdo, HPI_HIF_ADDR(host_cmd), host_cmd); /* flush the FIFO */ hpi_set_address(pdo, HPI_HIF_ADDR(host_cmd)); } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE) && --timeout); /* reset the interrupt bit */ iowrite32(0x00040004, pdo->prHPI_control); if (timeout) return 0; else return 1; } /* if the PCI2040 has recorded an HPI timeout, reset the error and return 1 */ static short hpi6000_check_PCI2040_error_flag(struct hpi_adapter_obj *pao, u16 read_or_write) { u32 hPI_error; struct hpi_hw_obj *phw = pao->priv; /* read the error bits from the PCI2040 */ hPI_error = ioread32(phw->dw2040_HPICSR + HPI_ERROR_REPORT); if (hPI_error) { /* reset the error flag */ iowrite32(0L, phw->dw2040_HPICSR + HPI_ERROR_REPORT); phw->pCI2040HPI_error_count++; if (read_or_write == 1) gw_pci_read_asserts++; /************* inc global */ else gw_pci_write_asserts++; return 1; } else return 0; } static short hpi6000_wait_dsp_ack(struct hpi_adapter_obj *pao, u16 dsp_index, u32 ack_value) { struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 ack = 0L; u32 timeout; u32 hPIC = 0L; /* wait for host interrupt to signal ack is ready */ timeout = TIMEOUT; while (--timeout) { hPIC = ioread32(pdo->prHPI_control); if (hPIC & 0x04) /* 0x04 = HINT from DSP */ break; } if (timeout == 0) return HPI_HIF_ERROR_MASK; /* wait for dwAckValue */ timeout = TIMEOUT; while (--timeout) { /* read the ack mailbox */ ack = hpi_read_word(pdo, HPI_HIF_ADDR(dsp_ack)); if (ack == ack_value) break; if ((ack & HPI_HIF_ERROR_MASK) && !hpi6000_check_PCI2040_error_flag(pao, H6READ)) break; /*for (i=0;i<1000;i++) */ /* dwPause=i+1; */ } if (ack & HPI_HIF_ERROR_MASK) /* indicates bad read from DSP - typically 0xffffff is read for some reason */ ack = HPI_HIF_ERROR_MASK; if (timeout == 0) ack = HPI_HIF_ERROR_MASK; return (short)ack; } static short hpi6000_update_control_cache(struct hpi_adapter_obj *pao, struct hpi_message *phm) { const u16 dsp_index = 0; struct hpi_hw_obj *phw = pao->priv; struct dsp_obj *pdo = &phw->ado[dsp_index]; u32 timeout; u32 cache_dirty_flag; u16 err; hpios_dsplock_lock(pao); timeout = TIMEOUT; do { cache_dirty_flag = hpi_read_word((struct dsp_obj *)pdo, HPI_HIF_ADDR(control_cache_is_dirty)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) { err = HPI6000_ERROR_CONTROL_CACHE_PARAMS; goto unlock; } if (cache_dirty_flag) { /* read the cached controls */ u32 address; u32 length; timeout = TIMEOUT; if (pdo->control_cache_address_on_dsp == 0) { do { address = hpi_read_word((struct dsp_obj *)pdo, HPI_HIF_ADDR(control_cache_address)); length = hpi_read_word((struct dsp_obj *)pdo, HPI_HIF_ADDR (control_cache_size_in_bytes)); } while (hpi6000_check_PCI2040_error_flag(pao, H6READ) && --timeout); if (!timeout) { err = HPI6000_ERROR_CONTROL_CACHE_ADDRLEN; goto unlock; } pdo->control_cache_address_on_dsp = address; pdo->control_cache_length_on_dsp = length; } else { address = pdo->control_cache_address_on_dsp; length = pdo->control_cache_length_on_dsp; } if (hpi6000_dsp_block_read32(pao, dsp_index, address, (u32 *)&phw->control_cache[0], length / sizeof(u32))) { err = HPI6000_ERROR_CONTROL_CACHE_READ; goto unlock; } do { hpi_write_word((struct dsp_obj *)pdo, HPI_HIF_ADDR(control_cache_is_dirty), 0); /* flush the FIFO */ hpi_set_address(pdo, HPI_HIF_ADDR(host_cmd)); } while (hpi6000_check_PCI2040_error_flag(pao, H6WRITE) && --timeout); if (!timeout) { err = HPI6000_ERROR_CONTROL_CACHE_FLUSH; goto unlock; } } err = 0; unlock: hpios_dsplock_unlock(pao); return err; } /** Get dsp index for multi DSP adapters only */ static u16 get_dsp_index(struct hpi_adapter_obj *pao, struct hpi_message *phm) { u16 ret = 0; switch (phm->object) { case HPI_OBJ_ISTREAM: if (phm->obj_index < 2) ret = 1; break; case HPI_OBJ_PROFILE: ret = phm->obj_index; break; default: break; } return ret; } /** Complete transaction with DSP Send message, get response, send or get stream data if any. */ static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm, struct hpi_response *phr) { u16 error = 0; u16 dsp_index = 0; struct hpi_hw_obj *phw = pao->priv; u16 num_dsp = phw->num_dsp; if (num_dsp < 2) dsp_index = 0; else { dsp_index = get_dsp_index(pao, phm); /* is this checked on the DSP anyway? */ if ((phm->function == HPI_ISTREAM_GROUP_ADD) || (phm->function == HPI_OSTREAM_GROUP_ADD)) { struct hpi_message hm; u16 add_index; hm.obj_index = phm->u.d.u.stream.stream_index; hm.object = phm->u.d.u.stream.object_type; add_index = get_dsp_index(pao, &hm); if (add_index != dsp_index) { phr->error = HPI_ERROR_NO_INTERDSP_GROUPS; return; } } } hpios_dsplock_lock(pao); error = hpi6000_message_response_sequence(pao, dsp_index, phm, phr); if (error) /* something failed in the HPI/DSP interface */ goto err; if (phr->error) /* something failed in the DSP */ goto out; switch (phm->function) { case HPI_OSTREAM_WRITE: case HPI_ISTREAM_ANC_WRITE: error = hpi6000_send_data(pao, dsp_index, phm, phr); break; case HPI_ISTREAM_READ: case HPI_OSTREAM_ANC_READ: error = hpi6000_get_data(pao, dsp_index, phm, phr); break; case HPI_ADAPTER_GET_ASSERT: phr->u.ax.assert.dsp_index = 0; /* dsp 0 default */ if (num_dsp == 2) { if (!phr->u.ax.assert.count) { /* no assert from dsp 0, check dsp 1 */ error = hpi6000_message_response_sequence(pao, 1, phm, phr); phr->u.ax.assert.dsp_index = 1; } } } err: if (error) { if (error >= HPI_ERROR_BACKEND_BASE) { phr->error = HPI_ERROR_DSP_COMMUNICATION; phr->specific_error = error; } else { phr->error = error; } /* just the header of the response is valid */ phr->size = sizeof(struct hpi_response_header); } out: hpios_dsplock_unlock(pao); return; }
gpl-2.0
embeddedarm/linux-3.0.35-imx6
arch/powerpc/sysdev/qe_lib/gpio.c
3012
8736
/* * QUICC Engine GPIOs * * Copyright (c) MontaVista Software, Inc. 2008. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/gpio.h> #include <linux/slab.h> #include <asm/qe.h> struct qe_gpio_chip { struct of_mm_gpio_chip mm_gc; spinlock_t lock; unsigned long pin_flags[QE_PIO_PINS]; #define QE_PIN_REQUESTED 0 /* shadowed data register to clear/set bits safely */ u32 cpdata; /* saved_regs used to restore dedicated functions */ struct qe_pio_regs saved_regs; }; static inline struct qe_gpio_chip * to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc) { return container_of(mm_gc, struct qe_gpio_chip, mm_gc); } static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) { struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; qe_gc->cpdata = in_be32(&regs->cpdata); qe_gc->saved_regs.cpdata = qe_gc->cpdata; qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1); qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2); qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1); qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2); qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr); } static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); return in_be32(&regs->cpdata) & pin_mask; } static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); struct qe_pio_regs __iomem *regs = mm_gc->regs; unsigned long flags; u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); spin_lock_irqsave(&qe_gc->lock, flags); if (val) qe_gc->cpdata |= pin_mask; else qe_gc->cpdata &= ~pin_mask; out_be32(&regs->cpdata, qe_gc->cpdata); spin_unlock_irqrestore(&qe_gc->lock, flags); } static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); unsigned long flags; spin_lock_irqsave(&qe_gc->lock, flags); __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0); spin_unlock_irqrestore(&qe_gc->lock, flags); return 0; } static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); unsigned long flags; qe_gpio_set(gc, gpio, val); spin_lock_irqsave(&qe_gc->lock, flags); __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0); spin_unlock_irqrestore(&qe_gc->lock, flags); return 0; } struct qe_pin { /* * The qe_gpio_chip name is unfortunate, we should change that to * something like qe_pio_controller. Someday. */ struct qe_gpio_chip *controller; int num; }; /** * qe_pin_request - Request a QE pin * @np: device node to get a pin from * @index: index of a pin in the device tree * Context: non-atomic * * This function return qe_pin so that you could use it with the rest of * the QE Pin Multiplexing API. */ struct qe_pin *qe_pin_request(struct device_node *np, int index) { struct qe_pin *qe_pin; struct device_node *gpio_np; struct gpio_chip *gc; struct of_mm_gpio_chip *mm_gc; struct qe_gpio_chip *qe_gc; int err; int size; const void *gpio_spec; const u32 *gpio_cells; unsigned long flags; qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL); if (!qe_pin) { pr_debug("%s: can't allocate memory\n", __func__); return ERR_PTR(-ENOMEM); } err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, &gpio_np, &gpio_spec); if (err) { pr_debug("%s: can't parse gpios property\n", __func__); goto err0; } if (!of_device_is_compatible(gpio_np, "fsl,mpc8323-qe-pario-bank")) { pr_debug("%s: tried to get a non-qe pin\n", __func__); err = -EINVAL; goto err1; } gc = of_node_to_gpiochip(gpio_np); if (!gc) { pr_debug("%s: gpio controller %s isn't registered\n", np->full_name, gpio_np->full_name); err = -ENODEV; goto err1; } gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size); if (!gpio_cells || size != sizeof(*gpio_cells) || *gpio_cells != gc->of_gpio_n_cells) { pr_debug("%s: wrong #gpio-cells for %s\n", np->full_name, gpio_np->full_name); err = -EINVAL; goto err1; } err = gc->of_xlate(gc, np, gpio_spec, NULL); if (err < 0) goto err1; mm_gc = to_of_mm_gpio_chip(gc); qe_gc = to_qe_gpio_chip(mm_gc); spin_lock_irqsave(&qe_gc->lock, flags); if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) { qe_pin->controller = qe_gc; qe_pin->num = err; err = 0; } else { err = -EBUSY; } spin_unlock_irqrestore(&qe_gc->lock, flags); if (!err) return qe_pin; err1: of_node_put(gpio_np); err0: kfree(qe_pin); pr_debug("%s failed with status %d\n", __func__, err); return ERR_PTR(err); } EXPORT_SYMBOL(qe_pin_request); /** * qe_pin_free - Free a pin * @qe_pin: pointer to the qe_pin structure * Context: any * * This function frees the qe_pin structure and makes a pin available * for further qe_pin_request() calls. */ void qe_pin_free(struct qe_pin *qe_pin) { struct qe_gpio_chip *qe_gc = qe_pin->controller; unsigned long flags; const int pin = qe_pin->num; spin_lock_irqsave(&qe_gc->lock, flags); test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]); spin_unlock_irqrestore(&qe_gc->lock, flags); kfree(qe_pin); } EXPORT_SYMBOL(qe_pin_free); /** * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode * @qe_pin: pointer to the qe_pin structure * Context: any * * This function resets a pin to a dedicated peripheral function that * has been set up by the firmware. */ void qe_pin_set_dedicated(struct qe_pin *qe_pin) { struct qe_gpio_chip *qe_gc = qe_pin->controller; struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; struct qe_pio_regs *sregs = &qe_gc->saved_regs; int pin = qe_pin->num; u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1)); u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2); bool second_reg = pin > (QE_PIO_PINS / 2) - 1; unsigned long flags; spin_lock_irqsave(&qe_gc->lock, flags); if (second_reg) { clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2); clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2); } else { clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2); clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2); } if (sregs->cpdata & mask1) qe_gc->cpdata |= mask1; else qe_gc->cpdata &= ~mask1; out_be32(&regs->cpdata, qe_gc->cpdata); clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1); spin_unlock_irqrestore(&qe_gc->lock, flags); } EXPORT_SYMBOL(qe_pin_set_dedicated); /** * qe_pin_set_gpio - Set a pin to the GPIO mode * @qe_pin: pointer to the qe_pin structure * Context: any * * This function sets a pin to the GPIO mode. */ void qe_pin_set_gpio(struct qe_pin *qe_pin) { struct qe_gpio_chip *qe_gc = qe_pin->controller; struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; unsigned long flags; spin_lock_irqsave(&qe_gc->lock, flags); /* Let's make it input by default, GPIO API is able to change that. */ __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0); spin_unlock_irqrestore(&qe_gc->lock, flags); } EXPORT_SYMBOL(qe_pin_set_gpio); static int __init qe_add_gpiochips(void) { struct device_node *np; for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") { int ret; struct qe_gpio_chip *qe_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL); if (!qe_gc) { ret = -ENOMEM; goto err; } spin_lock_init(&qe_gc->lock); mm_gc = &qe_gc->mm_gc; gc = &mm_gc->gc; mm_gc->save_regs = qe_gpio_save_regs; gc->ngpio = QE_PIO_PINS; gc->direction_input = qe_gpio_dir_in; gc->direction_output = qe_gpio_dir_out; gc->get = qe_gpio_get; gc->set = qe_gpio_set; ret = of_mm_gpiochip_add(np, mm_gc); if (ret) goto err; continue; err: pr_err("%s: registration failed with status %d\n", np->full_name, ret); kfree(qe_gc); /* try others anyway */ } return 0; } arch_initcall(qe_add_gpiochips);
gpl-2.0
chirayudesai/spawncamping-nemesis
net/netfilter/nf_conntrack_h323_main.c
4036
53239
/* * H.323 connection tracking helper * * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> * * This source code is licensed under General Public License version 2. * * Based on the 'brute force' H.323 connection tracking module by * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * For more information, please see http://nath323.sourceforge.net/ */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/slab.h> #include <linux/udp.h> #include <linux/tcp.h> #include <linux/skbuff.h> #include <net/route.h> #include <net/ip6_route.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_h323.h> /* Parameters */ static unsigned int default_rrq_ttl __read_mostly = 300; module_param(default_rrq_ttl, uint, 0600); MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ"); static int gkrouted_only __read_mostly = 1; module_param(gkrouted_only, int, 0600); MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); static bool callforward_filter __read_mostly = true; module_param(callforward_filter, bool, 0600); MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " "if both endpoints are on different sides " "(determined by routing information)"); /* Hooks for NAT */ int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned char **data, int dataoff, H245_TransportAddress *taddr, union nf_inet_addr *addr, __be16 port) __read_mostly; int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned char **data, int dataoff, TransportAddress *taddr, union nf_inet_addr *addr, __be16 port) __read_mostly; int (*set_sig_addr_hook) (struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, TransportAddress *taddr, int count) __read_mostly; int (*set_ras_addr_hook) (struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, TransportAddress *taddr, int count) __read_mostly; int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, H245_TransportAddress *taddr, __be16 port, __be16 rtp_port, struct nf_conntrack_expect *rtp_exp, struct nf_conntrack_expect *rtcp_exp) __read_mostly; int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, H245_TransportAddress *taddr, __be16 port, struct nf_conntrack_expect *exp) __read_mostly; int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, TransportAddress *taddr, __be16 port, struct nf_conntrack_expect *exp) __read_mostly; int (*nat_callforwarding_hook) (struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, TransportAddress *taddr, __be16 port, struct nf_conntrack_expect *exp) __read_mostly; int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, TransportAddress *taddr, int idx, __be16 port, struct nf_conntrack_expect *exp) __read_mostly; static DEFINE_SPINLOCK(nf_h323_lock); static char *h323_buffer; static struct nf_conntrack_helper nf_conntrack_helper_h245; static struct nf_conntrack_helper nf_conntrack_helper_q931[]; static struct nf_conntrack_helper nf_conntrack_helper_ras[]; /****************************************************************************/ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int *datalen, int *dataoff) { struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; int dir = CTINFO2DIR(ctinfo); const struct tcphdr *th; struct tcphdr _tcph; int tcpdatalen; int tcpdataoff; unsigned char *tpkt; int tpktlen; int tpktoff; /* Get TCP header */ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); if (th == NULL) return 0; /* Get TCP data offset */ tcpdataoff = protoff + th->doff * 4; /* Get TCP data length */ tcpdatalen = skb->len - tcpdataoff; if (tcpdatalen <= 0) /* No TCP data */ goto clear_out; if (*data == NULL) { /* first TPKT */ /* Get first TPKT pointer */ tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen, h323_buffer); BUG_ON(tpkt == NULL); /* Validate TPKT identifier */ if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) { /* Netmeeting sends TPKT header and data separately */ if (info->tpkt_len[dir] > 0) { pr_debug("nf_ct_h323: previous packet " "indicated separate TPKT data of %hu " "bytes\n", info->tpkt_len[dir]); if (info->tpkt_len[dir] <= tcpdatalen) { /* Yes, there was a TPKT header * received */ *data = tpkt; *datalen = info->tpkt_len[dir]; *dataoff = 0; goto out; } /* Fragmented TPKT */ pr_debug("nf_ct_h323: fragmented TPKT\n"); goto clear_out; } /* It is not even a TPKT */ return 0; } tpktoff = 0; } else { /* Next TPKT */ tpktoff = *dataoff + *datalen; tcpdatalen -= tpktoff; if (tcpdatalen <= 4) /* No more TPKT */ goto clear_out; tpkt = *data + *datalen; /* Validate TPKT identifier */ if (tpkt[0] != 0x03 || tpkt[1] != 0) goto clear_out; } /* Validate TPKT length */ tpktlen = tpkt[2] * 256 + tpkt[3]; if (tpktlen < 4) goto clear_out; if (tpktlen > tcpdatalen) { if (tcpdatalen == 4) { /* Separate TPKT header */ /* Netmeeting sends TPKT header and data separately */ pr_debug("nf_ct_h323: separate TPKT header indicates " "there will be TPKT data of %hu bytes\n", tpktlen - 4); info->tpkt_len[dir] = tpktlen - 4; return 0; } pr_debug("nf_ct_h323: incomplete TPKT (fragmented?)\n"); goto clear_out; } /* This is the encapsulated data */ *data = tpkt + 4; *datalen = tpktlen - 4; *dataoff = tpktoff + 4; out: /* Clear TPKT length */ info->tpkt_len[dir] = 0; return 1; clear_out: info->tpkt_len[dir] = 0; return 0; } /****************************************************************************/ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data, H245_TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port) { const unsigned char *p; int len; if (taddr->choice != eH245_TransportAddress_unicastAddress) return 0; switch (taddr->unicastAddress.choice) { case eUnicastAddress_iPAddress: if (nf_ct_l3num(ct) != AF_INET) return 0; p = data + taddr->unicastAddress.iPAddress.network; len = 4; break; case eUnicastAddress_iP6Address: if (nf_ct_l3num(ct) != AF_INET6) return 0; p = data + taddr->unicastAddress.iP6Address.network; len = 16; break; default: return 0; } memcpy(addr, p, len); memset((void *)addr + len, 0, sizeof(*addr) - len); memcpy(port, p + len, sizeof(__be16)); return 1; } /****************************************************************************/ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, H245_TransportAddress *taddr) { int dir = CTINFO2DIR(ctinfo); int ret = 0; __be16 port; __be16 rtp_port, rtcp_port; union nf_inet_addr addr; struct nf_conntrack_expect *rtp_exp; struct nf_conntrack_expect *rtcp_exp; typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp; /* Read RTP or RTCP address */ if (!get_h245_addr(ct, *data, taddr, &addr, &port) || memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || port == 0) return 0; /* RTP port is even */ port &= htons(~1); rtp_port = port; rtcp_port = htons(ntohs(port) + 1); /* Create expect for RTP */ if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP, NULL, &rtp_port); /* Create expect for RTCP */ if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) { nf_ct_expect_put(rtp_exp); return -1; } nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP, NULL, &rtcp_port); if (memcmp(&ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(ct->tuplehash[dir].tuple.src.u3)) && (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && ct->status & IPS_NAT_MASK) { /* NAT needed */ ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff, taddr, port, rtp_port, rtp_exp, rtcp_exp); } else { /* Conntrack only */ if (nf_ct_expect_related(rtp_exp) == 0) { if (nf_ct_expect_related(rtcp_exp) == 0) { pr_debug("nf_ct_h323: expect RTP "); nf_ct_dump_tuple(&rtp_exp->tuple); pr_debug("nf_ct_h323: expect RTCP "); nf_ct_dump_tuple(&rtcp_exp->tuple); } else { nf_ct_unexpect_related(rtp_exp); ret = -1; } } else ret = -1; } nf_ct_expect_put(rtp_exp); nf_ct_expect_put(rtcp_exp); return ret; } /****************************************************************************/ static int expect_t120(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, H245_TransportAddress *taddr) { int dir = CTINFO2DIR(ctinfo); int ret = 0; __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; typeof(nat_t120_hook) nat_t120; /* Read T.120 address */ if (!get_h245_addr(ct, *data, taddr, &addr, &port) || memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || port == 0) return 0; /* Create expect for T.120 connections */ if ((exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_TCP, NULL, &port); exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */ if (memcmp(&ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(ct->tuplehash[dir].tuple.src.u3)) && (nat_t120 = rcu_dereference(nat_t120_hook)) && ct->status & IPS_NAT_MASK) { /* NAT needed */ ret = nat_t120(skb, ct, ctinfo, data, dataoff, taddr, port, exp); } else { /* Conntrack only */ if (nf_ct_expect_related(exp) == 0) { pr_debug("nf_ct_h323: expect T.120 "); nf_ct_dump_tuple(&exp->tuple); } else ret = -1; } nf_ct_expect_put(exp); return ret; } /****************************************************************************/ static int process_h245_channel(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, H2250LogicalChannelParameters *channel) { int ret; if (channel->options & eH2250LogicalChannelParameters_mediaChannel) { /* RTP */ ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, &channel->mediaChannel); if (ret < 0) return -1; } if (channel-> options & eH2250LogicalChannelParameters_mediaControlChannel) { /* RTCP */ ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, &channel->mediaControlChannel); if (ret < 0) return -1; } return 0; } /****************************************************************************/ static int process_olc(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, OpenLogicalChannel *olc) { int ret; pr_debug("nf_ct_h323: OpenLogicalChannel\n"); if (olc->forwardLogicalChannelParameters.multiplexParameters.choice == eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters) { ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, &olc-> forwardLogicalChannelParameters. multiplexParameters. h2250LogicalChannelParameters); if (ret < 0) return -1; } if ((olc->options & eOpenLogicalChannel_reverseLogicalChannelParameters) && (olc->reverseLogicalChannelParameters.options & eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters) && (olc->reverseLogicalChannelParameters.multiplexParameters. choice == eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) { ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, &olc-> reverseLogicalChannelParameters. multiplexParameters. h2250LogicalChannelParameters); if (ret < 0) return -1; } if ((olc->options & eOpenLogicalChannel_separateStack) && olc->forwardLogicalChannelParameters.dataType.choice == eDataType_data && olc->forwardLogicalChannelParameters.dataType.data.application. choice == eDataApplicationCapability_application_t120 && olc->forwardLogicalChannelParameters.dataType.data.application. t120.choice == eDataProtocolCapability_separateLANStack && olc->separateStack.networkAddress.choice == eNetworkAccessParameters_networkAddress_localAreaAddress) { ret = expect_t120(skb, ct, ctinfo, data, dataoff, &olc->separateStack.networkAddress. localAreaAddress); if (ret < 0) return -1; } return 0; } /****************************************************************************/ static int process_olca(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, OpenLogicalChannelAck *olca) { H2250LogicalChannelAckParameters *ack; int ret; pr_debug("nf_ct_h323: OpenLogicalChannelAck\n"); if ((olca->options & eOpenLogicalChannelAck_reverseLogicalChannelParameters) && (olca->reverseLogicalChannelParameters.options & eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters) && (olca->reverseLogicalChannelParameters.multiplexParameters. choice == eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) { ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, &olca-> reverseLogicalChannelParameters. multiplexParameters. h2250LogicalChannelParameters); if (ret < 0) return -1; } if ((olca->options & eOpenLogicalChannelAck_forwardMultiplexAckParameters) && (olca->forwardMultiplexAckParameters.choice == eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters)) { ack = &olca->forwardMultiplexAckParameters. h2250LogicalChannelAckParameters; if (ack->options & eH2250LogicalChannelAckParameters_mediaChannel) { /* RTP */ ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, &ack->mediaChannel); if (ret < 0) return -1; } if (ack->options & eH2250LogicalChannelAckParameters_mediaControlChannel) { /* RTCP */ ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, &ack->mediaControlChannel); if (ret < 0) return -1; } } if ((olca->options & eOpenLogicalChannelAck_separateStack) && olca->separateStack.networkAddress.choice == eNetworkAccessParameters_networkAddress_localAreaAddress) { ret = expect_t120(skb, ct, ctinfo, data, dataoff, &olca->separateStack.networkAddress. localAreaAddress); if (ret < 0) return -1; } return 0; } /****************************************************************************/ static int process_h245(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, MultimediaSystemControlMessage *mscm) { switch (mscm->choice) { case eMultimediaSystemControlMessage_request: if (mscm->request.choice == eRequestMessage_openLogicalChannel) { return process_olc(skb, ct, ctinfo, data, dataoff, &mscm->request.openLogicalChannel); } pr_debug("nf_ct_h323: H.245 Request %d\n", mscm->request.choice); break; case eMultimediaSystemControlMessage_response: if (mscm->response.choice == eResponseMessage_openLogicalChannelAck) { return process_olca(skb, ct, ctinfo, data, dataoff, &mscm->response. openLogicalChannelAck); } pr_debug("nf_ct_h323: H.245 Response %d\n", mscm->response.choice); break; default: pr_debug("nf_ct_h323: H.245 signal %d\n", mscm->choice); break; } return 0; } /****************************************************************************/ static int h245_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { static MultimediaSystemControlMessage mscm; unsigned char *data = NULL; int datalen; int dataoff; int ret; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; pr_debug("nf_ct_h245: skblen = %u\n", skb->len); spin_lock_bh(&nf_h323_lock); /* Process each TPKT */ while (get_tpkt_data(skb, protoff, ct, ctinfo, &data, &datalen, &dataoff)) { pr_debug("nf_ct_h245: TPKT len=%d ", datalen); nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); /* Decode H.245 signal */ ret = DecodeMultimediaSystemControlMessage(data, datalen, &mscm); if (ret < 0) { pr_debug("nf_ct_h245: decoding error: %s\n", ret == H323_ERROR_BOUND ? "out of bound" : "out of range"); /* We don't drop when decoding error */ break; } /* Process H.245 signal */ if (process_h245(skb, ct, ctinfo, &data, dataoff, &mscm) < 0) goto drop; } spin_unlock_bh(&nf_h323_lock); return NF_ACCEPT; drop: spin_unlock_bh(&nf_h323_lock); if (net_ratelimit()) pr_info("nf_ct_h245: packet dropped\n"); return NF_DROP; } /****************************************************************************/ static const struct nf_conntrack_expect_policy h245_exp_policy = { .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */, .timeout = 240, }; static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { .name = "H.245", .me = THIS_MODULE, .tuple.src.l3num = AF_UNSPEC, .tuple.dst.protonum = IPPROTO_UDP, .help = h245_help, .expect_policy = &h245_exp_policy, }; /****************************************************************************/ int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port) { const unsigned char *p; int len; switch (taddr->choice) { case eTransportAddress_ipAddress: if (nf_ct_l3num(ct) != AF_INET) return 0; p = data + taddr->ipAddress.ip; len = 4; break; case eTransportAddress_ip6Address: if (nf_ct_l3num(ct) != AF_INET6) return 0; p = data + taddr->ip6Address.ip; len = 16; break; default: return 0; } memcpy(addr, p, len); memset((void *)addr + len, 0, sizeof(*addr) - len); memcpy(port, p + len, sizeof(__be16)); return 1; } /****************************************************************************/ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, TransportAddress *taddr) { int dir = CTINFO2DIR(ctinfo); int ret = 0; __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; typeof(nat_h245_hook) nat_h245; /* Read h245Address */ if (!get_h225_addr(ct, *data, taddr, &addr, &port) || memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || port == 0) return 0; /* Create expect for h245 connection */ if ((exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_TCP, NULL, &port); exp->helper = &nf_conntrack_helper_h245; if (memcmp(&ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(ct->tuplehash[dir].tuple.src.u3)) && (nat_h245 = rcu_dereference(nat_h245_hook)) && ct->status & IPS_NAT_MASK) { /* NAT needed */ ret = nat_h245(skb, ct, ctinfo, data, dataoff, taddr, port, exp); } else { /* Conntrack only */ if (nf_ct_expect_related(exp) == 0) { pr_debug("nf_ct_q931: expect H.245 "); nf_ct_dump_tuple(&exp->tuple); } else ret = -1; } nf_ct_expect_put(exp); return ret; } /* If the calling party is on the same side of the forward-to party, * we don't need to track the second call */ static int callforward_do_filter(const union nf_inet_addr *src, const union nf_inet_addr *dst, u_int8_t family) { const struct nf_afinfo *afinfo; int ret = 0; /* rcu_read_lock()ed by nf_hook_slow() */ afinfo = nf_get_afinfo(family); if (!afinfo) return 0; switch (family) { case AF_INET: { struct flowi4 fl1, fl2; struct rtable *rt1, *rt2; memset(&fl1, 0, sizeof(fl1)); fl1.daddr = src->ip; memset(&fl2, 0, sizeof(fl2)); fl2.daddr = dst->ip; if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, flowi4_to_flowi(&fl1), false)) { if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, flowi4_to_flowi(&fl2), false)) { if (rt1->rt_gateway == rt2->rt_gateway && rt1->dst.dev == rt2->dst.dev) ret = 1; dst_release(&rt2->dst); } dst_release(&rt1->dst); } break; } #if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6) case AF_INET6: { struct flowi6 fl1, fl2; struct rt6_info *rt1, *rt2; memset(&fl1, 0, sizeof(fl1)); fl1.daddr = src->in6; memset(&fl2, 0, sizeof(fl2)); fl2.daddr = dst->in6; if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, flowi6_to_flowi(&fl1), false)) { if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, flowi6_to_flowi(&fl2), false)) { if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, sizeof(rt1->rt6i_gateway)) && rt1->dst.dev == rt2->dst.dev) ret = 1; dst_release(&rt2->dst); } dst_release(&rt1->dst); } break; } #endif } return ret; } /****************************************************************************/ static int expect_callforwarding(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, TransportAddress *taddr) { int dir = CTINFO2DIR(ctinfo); int ret = 0; __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; typeof(nat_callforwarding_hook) nat_callforwarding; /* Read alternativeAddress */ if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0) return 0; /* If the calling party is on the same side of the forward-to party, * we don't need to track the second call */ if (callforward_filter && callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, nf_ct_l3num(ct))) { pr_debug("nf_ct_q931: Call Forwarding not tracked\n"); return 0; } /* Create expect for the second call leg */ if ((exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &addr, IPPROTO_TCP, NULL, &port); exp->helper = nf_conntrack_helper_q931; if (memcmp(&ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(ct->tuplehash[dir].tuple.src.u3)) && (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) && ct->status & IPS_NAT_MASK) { /* Need NAT */ ret = nat_callforwarding(skb, ct, ctinfo, data, dataoff, taddr, port, exp); } else { /* Conntrack only */ if (nf_ct_expect_related(exp) == 0) { pr_debug("nf_ct_q931: expect Call Forwarding "); nf_ct_dump_tuple(&exp->tuple); } else ret = -1; } nf_ct_expect_put(exp); return ret; } /****************************************************************************/ static int process_setup(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, Setup_UUIE *setup) { int dir = CTINFO2DIR(ctinfo); int ret; int i; __be16 port; union nf_inet_addr addr; typeof(set_h225_addr_hook) set_h225_addr; pr_debug("nf_ct_q931: Setup\n"); if (setup->options & eSetup_UUIE_h245Address) { ret = expect_h245(skb, ct, ctinfo, data, dataoff, &setup->h245Address); if (ret < 0) return -1; } set_h225_addr = rcu_dereference(set_h225_addr_hook); if ((setup->options & eSetup_UUIE_destCallSignalAddress) && (set_h225_addr) && ct->status & IPS_NAT_MASK && get_h225_addr(ct, *data, &setup->destCallSignalAddress, &addr, &port) && memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n", &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3, ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); ret = set_h225_addr(skb, data, dataoff, &setup->destCallSignalAddress, &ct->tuplehash[!dir].tuple.src.u3, ct->tuplehash[!dir].tuple.src.u.tcp.port); if (ret < 0) return -1; } if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && (set_h225_addr) && ct->status & IPS_NAT_MASK && get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, &addr, &port) && memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n", &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3, ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); ret = set_h225_addr(skb, data, dataoff, &setup->sourceCallSignalAddress, &ct->tuplehash[!dir].tuple.dst.u3, ct->tuplehash[!dir].tuple.dst.u.tcp.port); if (ret < 0) return -1; } if (setup->options & eSetup_UUIE_fastStart) { for (i = 0; i < setup->fastStart.count; i++) { ret = process_olc(skb, ct, ctinfo, data, dataoff, &setup->fastStart.item[i]); if (ret < 0) return -1; } } return 0; } /****************************************************************************/ static int process_callproceeding(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, CallProceeding_UUIE *callproc) { int ret; int i; pr_debug("nf_ct_q931: CallProceeding\n"); if (callproc->options & eCallProceeding_UUIE_h245Address) { ret = expect_h245(skb, ct, ctinfo, data, dataoff, &callproc->h245Address); if (ret < 0) return -1; } if (callproc->options & eCallProceeding_UUIE_fastStart) { for (i = 0; i < callproc->fastStart.count; i++) { ret = process_olc(skb, ct, ctinfo, data, dataoff, &callproc->fastStart.item[i]); if (ret < 0) return -1; } } return 0; } /****************************************************************************/ static int process_connect(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, Connect_UUIE *connect) { int ret; int i; pr_debug("nf_ct_q931: Connect\n"); if (connect->options & eConnect_UUIE_h245Address) { ret = expect_h245(skb, ct, ctinfo, data, dataoff, &connect->h245Address); if (ret < 0) return -1; } if (connect->options & eConnect_UUIE_fastStart) { for (i = 0; i < connect->fastStart.count; i++) { ret = process_olc(skb, ct, ctinfo, data, dataoff, &connect->fastStart.item[i]); if (ret < 0) return -1; } } return 0; } /****************************************************************************/ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, Alerting_UUIE *alert) { int ret; int i; pr_debug("nf_ct_q931: Alerting\n"); if (alert->options & eAlerting_UUIE_h245Address) { ret = expect_h245(skb, ct, ctinfo, data, dataoff, &alert->h245Address); if (ret < 0) return -1; } if (alert->options & eAlerting_UUIE_fastStart) { for (i = 0; i < alert->fastStart.count; i++) { ret = process_olc(skb, ct, ctinfo, data, dataoff, &alert->fastStart.item[i]); if (ret < 0) return -1; } } return 0; } /****************************************************************************/ static int process_facility(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, Facility_UUIE *facility) { int ret; int i; pr_debug("nf_ct_q931: Facility\n"); if (facility->reason.choice == eFacilityReason_callForwarded) { if (facility->options & eFacility_UUIE_alternativeAddress) return expect_callforwarding(skb, ct, ctinfo, data, dataoff, &facility-> alternativeAddress); return 0; } if (facility->options & eFacility_UUIE_h245Address) { ret = expect_h245(skb, ct, ctinfo, data, dataoff, &facility->h245Address); if (ret < 0) return -1; } if (facility->options & eFacility_UUIE_fastStart) { for (i = 0; i < facility->fastStart.count; i++) { ret = process_olc(skb, ct, ctinfo, data, dataoff, &facility->fastStart.item[i]); if (ret < 0) return -1; } } return 0; } /****************************************************************************/ static int process_progress(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, Progress_UUIE *progress) { int ret; int i; pr_debug("nf_ct_q931: Progress\n"); if (progress->options & eProgress_UUIE_h245Address) { ret = expect_h245(skb, ct, ctinfo, data, dataoff, &progress->h245Address); if (ret < 0) return -1; } if (progress->options & eProgress_UUIE_fastStart) { for (i = 0; i < progress->fastStart.count; i++) { ret = process_olc(skb, ct, ctinfo, data, dataoff, &progress->fastStart.item[i]); if (ret < 0) return -1; } } return 0; } /****************************************************************************/ static int process_q931(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, Q931 *q931) { H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu; int i; int ret = 0; switch (pdu->h323_message_body.choice) { case eH323_UU_PDU_h323_message_body_setup: ret = process_setup(skb, ct, ctinfo, data, dataoff, &pdu->h323_message_body.setup); break; case eH323_UU_PDU_h323_message_body_callProceeding: ret = process_callproceeding(skb, ct, ctinfo, data, dataoff, &pdu->h323_message_body. callProceeding); break; case eH323_UU_PDU_h323_message_body_connect: ret = process_connect(skb, ct, ctinfo, data, dataoff, &pdu->h323_message_body.connect); break; case eH323_UU_PDU_h323_message_body_alerting: ret = process_alerting(skb, ct, ctinfo, data, dataoff, &pdu->h323_message_body.alerting); break; case eH323_UU_PDU_h323_message_body_facility: ret = process_facility(skb, ct, ctinfo, data, dataoff, &pdu->h323_message_body.facility); break; case eH323_UU_PDU_h323_message_body_progress: ret = process_progress(skb, ct, ctinfo, data, dataoff, &pdu->h323_message_body.progress); break; default: pr_debug("nf_ct_q931: Q.931 signal %d\n", pdu->h323_message_body.choice); break; } if (ret < 0) return -1; if (pdu->options & eH323_UU_PDU_h245Control) { for (i = 0; i < pdu->h245Control.count; i++) { ret = process_h245(skb, ct, ctinfo, data, dataoff, &pdu->h245Control.item[i]); if (ret < 0) return -1; } } return 0; } /****************************************************************************/ static int q931_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { static Q931 q931; unsigned char *data = NULL; int datalen; int dataoff; int ret; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; pr_debug("nf_ct_q931: skblen = %u\n", skb->len); spin_lock_bh(&nf_h323_lock); /* Process each TPKT */ while (get_tpkt_data(skb, protoff, ct, ctinfo, &data, &datalen, &dataoff)) { pr_debug("nf_ct_q931: TPKT len=%d ", datalen); nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); /* Decode Q.931 signal */ ret = DecodeQ931(data, datalen, &q931); if (ret < 0) { pr_debug("nf_ct_q931: decoding error: %s\n", ret == H323_ERROR_BOUND ? "out of bound" : "out of range"); /* We don't drop when decoding error */ break; } /* Process Q.931 signal */ if (process_q931(skb, ct, ctinfo, &data, dataoff, &q931) < 0) goto drop; } spin_unlock_bh(&nf_h323_lock); return NF_ACCEPT; drop: spin_unlock_bh(&nf_h323_lock); if (net_ratelimit()) pr_info("nf_ct_q931: packet dropped\n"); return NF_DROP; } /****************************************************************************/ static const struct nf_conntrack_expect_policy q931_exp_policy = { /* T.120 and H.245 */ .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4, .timeout = 240, }; static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { { .name = "Q.931", .me = THIS_MODULE, .tuple.src.l3num = AF_INET, .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), .tuple.dst.protonum = IPPROTO_TCP, .help = q931_help, .expect_policy = &q931_exp_policy, }, { .name = "Q.931", .me = THIS_MODULE, .tuple.src.l3num = AF_INET6, .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), .tuple.dst.protonum = IPPROTO_TCP, .help = q931_help, .expect_policy = &q931_exp_policy, }, }; /****************************************************************************/ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff, int *datalen) { const struct udphdr *uh; struct udphdr _uh; int dataoff; uh = skb_header_pointer(skb, protoff, sizeof(_uh), &_uh); if (uh == NULL) return NULL; dataoff = protoff + sizeof(_uh); if (dataoff >= skb->len) return NULL; *datalen = skb->len - dataoff; return skb_header_pointer(skb, dataoff, *datalen, h323_buffer); } /****************************************************************************/ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, union nf_inet_addr *addr, __be16 port) { struct net *net = nf_ct_net(ct); struct nf_conntrack_expect *exp; struct nf_conntrack_tuple tuple; memset(&tuple.src.u3, 0, sizeof(tuple.src.u3)); tuple.src.u.tcp.port = 0; memcpy(&tuple.dst.u3, addr, sizeof(tuple.dst.u3)); tuple.dst.u.tcp.port = port; tuple.dst.protonum = IPPROTO_TCP; exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); if (exp && exp->master == ct) return exp; return NULL; } /****************************************************************************/ static int set_expect_timeout(struct nf_conntrack_expect *exp, unsigned timeout) { if (!exp || !del_timer(&exp->timeout)) return 0; exp->timeout.expires = jiffies + timeout * HZ; add_timer(&exp->timeout); return 1; } /****************************************************************************/ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, TransportAddress *taddr, int count) { struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; int dir = CTINFO2DIR(ctinfo); int ret = 0; int i; __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; typeof(nat_q931_hook) nat_q931; /* Look for the first related address */ for (i = 0; i < count; i++) { if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) == 0 && port != 0) break; } if (i >= count) /* Not found */ return 0; /* Create expect for Q.931 */ if ((exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), gkrouted_only ? /* only accept calls from GK? */ &ct->tuplehash[!dir].tuple.src.u3 : NULL, &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_TCP, NULL, &port); exp->helper = nf_conntrack_helper_q931; exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */ nat_q931 = rcu_dereference(nat_q931_hook); if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */ ret = nat_q931(skb, ct, ctinfo, data, taddr, i, port, exp); } else { /* Conntrack only */ if (nf_ct_expect_related(exp) == 0) { pr_debug("nf_ct_ras: expect Q.931 "); nf_ct_dump_tuple(&exp->tuple); /* Save port for looking up expect in processing RCF */ info->sig_port[dir] = port; } else ret = -1; } nf_ct_expect_put(exp); return ret; } /****************************************************************************/ static int process_grq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, GatekeeperRequest *grq) { typeof(set_ras_addr_hook) set_ras_addr; pr_debug("nf_ct_ras: GRQ\n"); set_ras_addr = rcu_dereference(set_ras_addr_hook); if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */ return set_ras_addr(skb, ct, ctinfo, data, &grq->rasAddress, 1); return 0; } /****************************************************************************/ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, GatekeeperConfirm *gcf) { int dir = CTINFO2DIR(ctinfo); int ret = 0; __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; pr_debug("nf_ct_ras: GCF\n"); if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port)) return 0; /* Registration port is the same as discovery port */ if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && port == ct->tuplehash[dir].tuple.src.u.udp.port) return 0; /* Avoid RAS expectation loops. A GCF is never expected. */ if (test_bit(IPS_EXPECTED_BIT, &ct->status)) return 0; /* Need new expect */ if ((exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &addr, IPPROTO_UDP, NULL, &port); exp->helper = nf_conntrack_helper_ras; if (nf_ct_expect_related(exp) == 0) { pr_debug("nf_ct_ras: expect RAS "); nf_ct_dump_tuple(&exp->tuple); } else ret = -1; nf_ct_expect_put(exp); return ret; } /****************************************************************************/ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, RegistrationRequest *rrq) { struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; int ret; typeof(set_ras_addr_hook) set_ras_addr; pr_debug("nf_ct_ras: RRQ\n"); ret = expect_q931(skb, ct, ctinfo, data, rrq->callSignalAddress.item, rrq->callSignalAddress.count); if (ret < 0) return -1; set_ras_addr = rcu_dereference(set_ras_addr_hook); if (set_ras_addr && ct->status & IPS_NAT_MASK) { ret = set_ras_addr(skb, ct, ctinfo, data, rrq->rasAddress.item, rrq->rasAddress.count); if (ret < 0) return -1; } if (rrq->options & eRegistrationRequest_timeToLive) { pr_debug("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive); info->timeout = rrq->timeToLive; } else info->timeout = default_rrq_ttl; return 0; } /****************************************************************************/ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, RegistrationConfirm *rcf) { struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; int dir = CTINFO2DIR(ctinfo); int ret; struct nf_conntrack_expect *exp; typeof(set_sig_addr_hook) set_sig_addr; pr_debug("nf_ct_ras: RCF\n"); set_sig_addr = rcu_dereference(set_sig_addr_hook); if (set_sig_addr && ct->status & IPS_NAT_MASK) { ret = set_sig_addr(skb, ct, ctinfo, data, rcf->callSignalAddress.item, rcf->callSignalAddress.count); if (ret < 0) return -1; } if (rcf->options & eRegistrationConfirm_timeToLive) { pr_debug("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive); info->timeout = rcf->timeToLive; } if (info->timeout > 0) { pr_debug("nf_ct_ras: set RAS connection timeout to " "%u seconds\n", info->timeout); nf_ct_refresh(ct, skb, info->timeout * HZ); /* Set expect timeout */ spin_lock_bh(&nf_conntrack_lock); exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, info->sig_port[!dir]); if (exp) { pr_debug("nf_ct_ras: set Q.931 expect " "timeout to %u seconds for", info->timeout); nf_ct_dump_tuple(&exp->tuple); set_expect_timeout(exp, info->timeout); } spin_unlock_bh(&nf_conntrack_lock); } return 0; } /****************************************************************************/ static int process_urq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, UnregistrationRequest *urq) { struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; int dir = CTINFO2DIR(ctinfo); int ret; typeof(set_sig_addr_hook) set_sig_addr; pr_debug("nf_ct_ras: URQ\n"); set_sig_addr = rcu_dereference(set_sig_addr_hook); if (set_sig_addr && ct->status & IPS_NAT_MASK) { ret = set_sig_addr(skb, ct, ctinfo, data, urq->callSignalAddress.item, urq->callSignalAddress.count); if (ret < 0) return -1; } /* Clear old expect */ nf_ct_remove_expectations(ct); info->sig_port[dir] = 0; info->sig_port[!dir] = 0; /* Give it 30 seconds for UCF or URJ */ nf_ct_refresh(ct, skb, 30 * HZ); return 0; } /****************************************************************************/ static int process_arq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, AdmissionRequest *arq) { const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; int dir = CTINFO2DIR(ctinfo); __be16 port; union nf_inet_addr addr; typeof(set_h225_addr_hook) set_h225_addr; pr_debug("nf_ct_ras: ARQ\n"); set_h225_addr = rcu_dereference(set_h225_addr_hook); if ((arq->options & eAdmissionRequest_destCallSignalAddress) && get_h225_addr(ct, *data, &arq->destCallSignalAddress, &addr, &port) && !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && port == info->sig_port[dir] && set_h225_addr && ct->status & IPS_NAT_MASK) { /* Answering ARQ */ return set_h225_addr(skb, data, 0, &arq->destCallSignalAddress, &ct->tuplehash[!dir].tuple.dst.u3, info->sig_port[!dir]); } if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && get_h225_addr(ct, *data, &arq->srcCallSignalAddress, &addr, &port) && !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && set_h225_addr && ct->status & IPS_NAT_MASK) { /* Calling ARQ */ return set_h225_addr(skb, data, 0, &arq->srcCallSignalAddress, &ct->tuplehash[!dir].tuple.dst.u3, port); } return 0; } /****************************************************************************/ static int process_acf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, AdmissionConfirm *acf) { int dir = CTINFO2DIR(ctinfo); int ret = 0; __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; typeof(set_sig_addr_hook) set_sig_addr; pr_debug("nf_ct_ras: ACF\n"); if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress, &addr, &port)) return 0; if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) { /* Answering ACF */ set_sig_addr = rcu_dereference(set_sig_addr_hook); if (set_sig_addr && ct->status & IPS_NAT_MASK) return set_sig_addr(skb, ct, ctinfo, data, &acf->destCallSignalAddress, 1); return 0; } /* Need new expect */ if ((exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &addr, IPPROTO_TCP, NULL, &port); exp->flags = NF_CT_EXPECT_PERMANENT; exp->helper = nf_conntrack_helper_q931; if (nf_ct_expect_related(exp) == 0) { pr_debug("nf_ct_ras: expect Q.931 "); nf_ct_dump_tuple(&exp->tuple); } else ret = -1; nf_ct_expect_put(exp); return ret; } /****************************************************************************/ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, LocationRequest *lrq) { typeof(set_ras_addr_hook) set_ras_addr; pr_debug("nf_ct_ras: LRQ\n"); set_ras_addr = rcu_dereference(set_ras_addr_hook); if (set_ras_addr && ct->status & IPS_NAT_MASK) return set_ras_addr(skb, ct, ctinfo, data, &lrq->replyAddress, 1); return 0; } /****************************************************************************/ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, LocationConfirm *lcf) { int dir = CTINFO2DIR(ctinfo); int ret = 0; __be16 port; union nf_inet_addr addr; struct nf_conntrack_expect *exp; pr_debug("nf_ct_ras: LCF\n"); if (!get_h225_addr(ct, *data, &lcf->callSignalAddress, &addr, &port)) return 0; /* Need new expect for call signal */ if ((exp = nf_ct_expect_alloc(ct)) == NULL) return -1; nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[!dir].tuple.src.u3, &addr, IPPROTO_TCP, NULL, &port); exp->flags = NF_CT_EXPECT_PERMANENT; exp->helper = nf_conntrack_helper_q931; if (nf_ct_expect_related(exp) == 0) { pr_debug("nf_ct_ras: expect Q.931 "); nf_ct_dump_tuple(&exp->tuple); } else ret = -1; nf_ct_expect_put(exp); /* Ignore rasAddress */ return ret; } /****************************************************************************/ static int process_irr(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, InfoRequestResponse *irr) { int ret; typeof(set_ras_addr_hook) set_ras_addr; typeof(set_sig_addr_hook) set_sig_addr; pr_debug("nf_ct_ras: IRR\n"); set_ras_addr = rcu_dereference(set_ras_addr_hook); if (set_ras_addr && ct->status & IPS_NAT_MASK) { ret = set_ras_addr(skb, ct, ctinfo, data, &irr->rasAddress, 1); if (ret < 0) return -1; } set_sig_addr = rcu_dereference(set_sig_addr_hook); if (set_sig_addr && ct->status & IPS_NAT_MASK) { ret = set_sig_addr(skb, ct, ctinfo, data, irr->callSignalAddress.item, irr->callSignalAddress.count); if (ret < 0) return -1; } return 0; } /****************************************************************************/ static int process_ras(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, RasMessage *ras) { switch (ras->choice) { case eRasMessage_gatekeeperRequest: return process_grq(skb, ct, ctinfo, data, &ras->gatekeeperRequest); case eRasMessage_gatekeeperConfirm: return process_gcf(skb, ct, ctinfo, data, &ras->gatekeeperConfirm); case eRasMessage_registrationRequest: return process_rrq(skb, ct, ctinfo, data, &ras->registrationRequest); case eRasMessage_registrationConfirm: return process_rcf(skb, ct, ctinfo, data, &ras->registrationConfirm); case eRasMessage_unregistrationRequest: return process_urq(skb, ct, ctinfo, data, &ras->unregistrationRequest); case eRasMessage_admissionRequest: return process_arq(skb, ct, ctinfo, data, &ras->admissionRequest); case eRasMessage_admissionConfirm: return process_acf(skb, ct, ctinfo, data, &ras->admissionConfirm); case eRasMessage_locationRequest: return process_lrq(skb, ct, ctinfo, data, &ras->locationRequest); case eRasMessage_locationConfirm: return process_lcf(skb, ct, ctinfo, data, &ras->locationConfirm); case eRasMessage_infoRequestResponse: return process_irr(skb, ct, ctinfo, data, &ras->infoRequestResponse); default: pr_debug("nf_ct_ras: RAS message %d\n", ras->choice); break; } return 0; } /****************************************************************************/ static int ras_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { static RasMessage ras; unsigned char *data; int datalen = 0; int ret; pr_debug("nf_ct_ras: skblen = %u\n", skb->len); spin_lock_bh(&nf_h323_lock); /* Get UDP data */ data = get_udp_data(skb, protoff, &datalen); if (data == NULL) goto accept; pr_debug("nf_ct_ras: RAS message len=%d ", datalen); nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); /* Decode RAS message */ ret = DecodeRasMessage(data, datalen, &ras); if (ret < 0) { pr_debug("nf_ct_ras: decoding error: %s\n", ret == H323_ERROR_BOUND ? "out of bound" : "out of range"); goto accept; } /* Process RAS message */ if (process_ras(skb, ct, ctinfo, &data, &ras) < 0) goto drop; accept: spin_unlock_bh(&nf_h323_lock); return NF_ACCEPT; drop: spin_unlock_bh(&nf_h323_lock); if (net_ratelimit()) pr_info("nf_ct_ras: packet dropped\n"); return NF_DROP; } /****************************************************************************/ static const struct nf_conntrack_expect_policy ras_exp_policy = { .max_expected = 32, .timeout = 240, }; static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { { .name = "RAS", .me = THIS_MODULE, .tuple.src.l3num = AF_INET, .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), .tuple.dst.protonum = IPPROTO_UDP, .help = ras_help, .expect_policy = &ras_exp_policy, }, { .name = "RAS", .me = THIS_MODULE, .tuple.src.l3num = AF_INET6, .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), .tuple.dst.protonum = IPPROTO_UDP, .help = ras_help, .expect_policy = &ras_exp_policy, }, }; /****************************************************************************/ static void __exit nf_conntrack_h323_fini(void) { nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]); nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); kfree(h323_buffer); pr_debug("nf_ct_h323: fini\n"); } /****************************************************************************/ static int __init nf_conntrack_h323_init(void) { int ret; h323_buffer = kmalloc(65536, GFP_KERNEL); if (!h323_buffer) return -ENOMEM; ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245); if (ret < 0) goto err1; ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]); if (ret < 0) goto err2; ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]); if (ret < 0) goto err3; ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]); if (ret < 0) goto err4; ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]); if (ret < 0) goto err5; pr_debug("nf_ct_h323: init success\n"); return 0; err5: nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); err4: nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); err3: nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); err2: nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); err1: kfree(h323_buffer); return ret; } /****************************************************************************/ module_init(nf_conntrack_h323_init); module_exit(nf_conntrack_h323_fini); EXPORT_SYMBOL_GPL(get_h225_addr); EXPORT_SYMBOL_GPL(set_h245_addr_hook); EXPORT_SYMBOL_GPL(set_h225_addr_hook); EXPORT_SYMBOL_GPL(set_sig_addr_hook); EXPORT_SYMBOL_GPL(set_ras_addr_hook); EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook); EXPORT_SYMBOL_GPL(nat_t120_hook); EXPORT_SYMBOL_GPL(nat_h245_hook); EXPORT_SYMBOL_GPL(nat_callforwarding_hook); EXPORT_SYMBOL_GPL(nat_q931_hook); MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); MODULE_DESCRIPTION("H.323 connection tracking helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_conntrack_h323"); MODULE_ALIAS_NFCT_HELPER("h323");
gpl-2.0
lbule/ALPS.L0.MP6.V3.18_LCSH6795_LWT_L_KERNEL
drivers/hid/hid-generic.c
4548
1140
/* * HID support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2007-2008 Oliver Neukum * Copyright (c) 2006-2012 Jiri Kosina * Copyright (c) 2012 Henrik Rydberg */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/kernel.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <linux/hid.h> static const struct hid_device_id hid_table[] = { { HID_DEVICE(HID_BUS_ANY, HID_GROUP_GENERIC, HID_ANY_ID, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, hid_table); static struct hid_driver hid_generic = { .name = "hid-generic", .id_table = hid_table, }; module_hid_driver(hid_generic); MODULE_AUTHOR("Henrik Rydberg"); MODULE_DESCRIPTION("HID generic driver"); MODULE_LICENSE("GPL");
gpl-2.0
binkybear/nexus10-5
drivers/isdn/mISDN/core.c
4804
8709
/* * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mISDNif.h> #include "core.h" static u_int debug; MODULE_AUTHOR("Karsten Keil"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); static u64 device_ids; #define MAX_DEVICE_ID 63 static LIST_HEAD(Bprotocols); static DEFINE_RWLOCK(bp_lock); static void mISDN_dev_release(struct device *dev) { /* nothing to do: the device is part of its parent's data structure */ } static ssize_t _show_id(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->id); } static ssize_t _show_nrbchan(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->nrbchan); } static ssize_t _show_d_protocols(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->Dprotocols); } static ssize_t _show_b_protocols(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols()); } static ssize_t _show_protocol(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->D.protocol); } static ssize_t _show_name(struct device *dev, struct device_attribute *attr, char *buf) { strcpy(buf, dev_name(dev)); return strlen(buf); } #if 0 /* hangs */ static ssize_t _set_name(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err = 0; char *out = kmalloc(count + 1, GFP_KERNEL); if (!out) return -ENOMEM; memcpy(out, buf, count); if (count && out[count - 1] == '\n') out[--count] = 0; if (count) err = device_rename(dev, out); kfree(out); return (err < 0) ? err : count; } #endif static ssize_t _show_channelmap(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); char *bp = buf; int i; for (i = 0; i <= mdev->nrbchan; i++) *bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0'; return bp - buf; } static struct device_attribute mISDN_dev_attrs[] = { __ATTR(id, S_IRUGO, _show_id, NULL), __ATTR(d_protocols, S_IRUGO, _show_d_protocols, NULL), __ATTR(b_protocols, S_IRUGO, _show_b_protocols, NULL), __ATTR(protocol, S_IRUGO, _show_protocol, NULL), __ATTR(channelmap, S_IRUGO, _show_channelmap, NULL), __ATTR(nrbchan, S_IRUGO, _show_nrbchan, NULL), __ATTR(name, S_IRUGO, _show_name, NULL), /* __ATTR(name, S_IRUGO | S_IWUSR, _show_name, _set_name), */ {} }; #ifdef CONFIG_HOTPLUG static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return 0; if (add_uevent_var(env, "nchans=%d", mdev->nrbchan)) return -ENOMEM; return 0; } #endif static void mISDN_class_release(struct class *cls) { /* do nothing, it's static */ } static struct class mISDN_class = { .name = "mISDN", .owner = THIS_MODULE, #ifdef CONFIG_HOTPLUG .dev_uevent = mISDN_uevent, #endif .dev_attrs = mISDN_dev_attrs, .dev_release = mISDN_dev_release, .class_release = mISDN_class_release, }; static int _get_mdevice(struct device *dev, void *id) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return 0; if (mdev->id != *(u_int *)id) return 0; return 1; } struct mISDNdevice *get_mdevice(u_int id) { return dev_to_mISDN(class_find_device(&mISDN_class, NULL, &id, _get_mdevice)); } static int _get_mdevice_count(struct device *dev, void *cnt) { *(int *)cnt += 1; return 0; } int get_mdevice_count(void) { int cnt = 0; class_for_each_device(&mISDN_class, NULL, &cnt, _get_mdevice_count); return cnt; } static int get_free_devid(void) { u_int i; for (i = 0; i <= MAX_DEVICE_ID; i++) if (!test_and_set_bit(i, (u_long *)&device_ids)) break; if (i > MAX_DEVICE_ID) return -EBUSY; return i; } int mISDN_register_device(struct mISDNdevice *dev, struct device *parent, char *name) { int err; err = get_free_devid(); if (err < 0) goto error1; dev->id = err; device_initialize(&dev->dev); if (name && name[0]) dev_set_name(&dev->dev, "%s", name); else dev_set_name(&dev->dev, "mISDN%d", dev->id); if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_register %s %d\n", dev_name(&dev->dev), dev->id); err = create_stack(dev); if (err) goto error1; dev->dev.class = &mISDN_class; dev->dev.platform_data = dev; dev->dev.parent = parent; dev_set_drvdata(&dev->dev, dev); err = device_add(&dev->dev); if (err) goto error3; return 0; error3: delete_stack(dev); return err; error1: return err; } EXPORT_SYMBOL(mISDN_register_device); void mISDN_unregister_device(struct mISDNdevice *dev) { if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_unregister %s %d\n", dev_name(&dev->dev), dev->id); /* sysfs_remove_link(&dev->dev.kobj, "device"); */ device_del(&dev->dev); dev_set_drvdata(&dev->dev, NULL); test_and_clear_bit(dev->id, (u_long *)&device_ids); delete_stack(dev); put_device(&dev->dev); } EXPORT_SYMBOL(mISDN_unregister_device); u_int get_all_Bprotocols(void) { struct Bprotocol *bp; u_int m = 0; read_lock(&bp_lock); list_for_each_entry(bp, &Bprotocols, list) m |= bp->Bprotocols; read_unlock(&bp_lock); return m; } struct Bprotocol * get_Bprotocol4mask(u_int m) { struct Bprotocol *bp; read_lock(&bp_lock); list_for_each_entry(bp, &Bprotocols, list) if (bp->Bprotocols & m) { read_unlock(&bp_lock); return bp; } read_unlock(&bp_lock); return NULL; } struct Bprotocol * get_Bprotocol4id(u_int id) { u_int m; if (id < ISDN_P_B_START || id > 63) { printk(KERN_WARNING "%s id not in range %d\n", __func__, id); return NULL; } m = 1 << (id & ISDN_P_B_MASK); return get_Bprotocol4mask(m); } int mISDN_register_Bprotocol(struct Bprotocol *bp) { u_long flags; struct Bprotocol *old; if (debug & DEBUG_CORE) printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, bp->Bprotocols); old = get_Bprotocol4mask(bp->Bprotocols); if (old) { printk(KERN_WARNING "register duplicate protocol old %s/%x new %s/%x\n", old->name, old->Bprotocols, bp->name, bp->Bprotocols); return -EBUSY; } write_lock_irqsave(&bp_lock, flags); list_add_tail(&bp->list, &Bprotocols); write_unlock_irqrestore(&bp_lock, flags); return 0; } EXPORT_SYMBOL(mISDN_register_Bprotocol); void mISDN_unregister_Bprotocol(struct Bprotocol *bp) { u_long flags; if (debug & DEBUG_CORE) printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, bp->Bprotocols); write_lock_irqsave(&bp_lock, flags); list_del(&bp->list); write_unlock_irqrestore(&bp_lock, flags); } EXPORT_SYMBOL(mISDN_unregister_Bprotocol); static int mISDNInit(void) { int err; printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n", MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE); mISDN_init_clock(&debug); mISDN_initstack(&debug); err = class_register(&mISDN_class); if (err) goto error1; err = mISDN_inittimer(&debug); if (err) goto error2; err = l1_init(&debug); if (err) goto error3; err = Isdnl2_Init(&debug); if (err) goto error4; err = misdn_sock_init(&debug); if (err) goto error5; return 0; error5: Isdnl2_cleanup(); error4: l1_cleanup(); error3: mISDN_timer_cleanup(); error2: class_unregister(&mISDN_class); error1: return err; } static void mISDN_cleanup(void) { misdn_sock_cleanup(); Isdnl2_cleanup(); l1_cleanup(); mISDN_timer_cleanup(); class_unregister(&mISDN_class); printk(KERN_DEBUG "mISDNcore unloaded\n"); } module_init(mISDNInit); module_exit(mISDN_cleanup);
gpl-2.0
kelvinbui31/android_mediatek_muse72
drivers/watchdog/pcwd_pci.c
4804
20765
/* * Berkshire PCI-PC Watchdog Card Driver * * (c) Copyright 2003-2007 Wim Van Sebroeck <wim@iguana.be>. * * Based on source code of the following authors: * Ken Hollis <kenji@bitgate.com>, * Lindsay Harris <lindsay@bluegum.com>, * Alan Cox <alan@lxorguk.ukuu.org.uk>, * Matt Domsch <Matt_Domsch@dell.com>, * Rob Radez <rob@osinvestor.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. */ /* * A bells and whistles driver is available from: * http://www.kernel.org/pub/linux/kernel/people/wim/pcwd/pcwd_pci/ * * More info available at * http://www.berkprod.com/ or http://www.pcwatchdog.com/ */ /* * Includes, defines, variables, module parameters, ... */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ #include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ #include <linux/pci.h> /* For pci functions */ #include <linux/ioport.h> /* For io-port access */ #include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include <linux/io.h> /* For inb/outb/... */ /* Module and version information */ #define WATCHDOG_VERSION "1.03" #define WATCHDOG_DRIVER_NAME "PCI-PC Watchdog" #define WATCHDOG_NAME "pcwd_pci" #define DRIVER_VERSION WATCHDOG_DRIVER_NAME " driver, v" WATCHDOG_VERSION /* Stuff for the PCI ID's */ #ifndef PCI_VENDOR_ID_QUICKLOGIC #define PCI_VENDOR_ID_QUICKLOGIC 0x11e3 #endif #ifndef PCI_DEVICE_ID_WATCHDOG_PCIPCWD #define PCI_DEVICE_ID_WATCHDOG_PCIPCWD 0x5030 #endif /* * These are the defines that describe the control status bits for the * PCI-PC Watchdog card. */ /* Port 1 : Control Status #1 */ #define WD_PCI_WTRP 0x01 /* Watchdog Trip status */ #define WD_PCI_HRBT 0x02 /* Watchdog Heartbeat */ #define WD_PCI_TTRP 0x04 /* Temperature Trip status */ #define WD_PCI_RL2A 0x08 /* Relay 2 Active */ #define WD_PCI_RL1A 0x10 /* Relay 1 Active */ #define WD_PCI_R2DS 0x40 /* Relay 2 Disable Temperature-trip / reset */ #define WD_PCI_RLY2 0x80 /* Activate Relay 2 on the board */ /* Port 2 : Control Status #2 */ #define WD_PCI_WDIS 0x10 /* Watchdog Disable */ #define WD_PCI_ENTP 0x20 /* Enable Temperature Trip Reset */ #define WD_PCI_WRSP 0x40 /* Watchdog wrote response */ #define WD_PCI_PCMD 0x80 /* PC has sent command */ /* according to documentation max. time to process a command for the pci * watchdog card is 100 ms, so we give it 150 ms to do it's job */ #define PCI_COMMAND_TIMEOUT 150 /* Watchdog's internal commands */ #define CMD_GET_STATUS 0x04 #define CMD_GET_FIRMWARE_VERSION 0x08 #define CMD_READ_WATCHDOG_TIMEOUT 0x18 #define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 #define CMD_GET_CLEAR_RESET_COUNT 0x84 /* Watchdog's Dip Switch heartbeat values */ static const int heartbeat_tbl[] = { 5, /* OFF-OFF-OFF = 5 Sec */ 10, /* OFF-OFF-ON = 10 Sec */ 30, /* OFF-ON-OFF = 30 Sec */ 60, /* OFF-ON-ON = 1 Min */ 300, /* ON-OFF-OFF = 5 Min */ 600, /* ON-OFF-ON = 10 Min */ 1800, /* ON-ON-OFF = 30 Min */ 3600, /* ON-ON-ON = 1 hour */ }; /* We can only use 1 card due to the /dev/watchdog restriction */ static int cards_found; /* internal variables */ static int temp_panic; static unsigned long is_active; static char expect_release; /* this is private data for each PCI-PC watchdog card */ static struct { /* Wether or not the card has a temperature device */ int supports_temp; /* The card's boot status */ int boot_status; /* The cards I/O address */ unsigned long io_addr; /* the lock for io operations */ spinlock_t io_lock; /* the PCI-device */ struct pci_dev *pdev; } pcipcwd_private; /* module parameters */ #define QUIET 0 /* Default */ #define VERBOSE 1 /* Verbose */ #define DEBUG 2 /* print fancy stuff too */ static int debug = QUIET; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)"); #define WATCHDOG_HEARTBEAT 0 /* default heartbeat = delay-time from dip-switches */ static int heartbeat = WATCHDOG_HEARTBEAT; module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. " "(0<heartbeat<65536 or 0=delay-time from dip-switches, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Internal functions */ static int send_command(int cmd, int *msb, int *lsb) { int got_response, count; if (debug >= DEBUG) pr_debug("sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x\n", cmd, *msb, *lsb); spin_lock(&pcipcwd_private.io_lock); /* If a command requires data it should be written first. * Data for commands with 8 bits of data should be written to port 4. * Commands with 16 bits of data, should be written as LSB to port 4 * and MSB to port 5. * After the required data has been written then write the command to * port 6. */ outb_p(*lsb, pcipcwd_private.io_addr + 4); outb_p(*msb, pcipcwd_private.io_addr + 5); outb_p(cmd, pcipcwd_private.io_addr + 6); /* wait till the pci card processed the command, signaled by * the WRSP bit in port 2 and give it a max. timeout of * PCI_COMMAND_TIMEOUT to process */ got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP; for (count = 0; (count < PCI_COMMAND_TIMEOUT) && (!got_response); count++) { mdelay(1); got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP; } if (debug >= DEBUG) { if (got_response) { pr_debug("time to process command was: %d ms\n", count); } else { pr_debug("card did not respond on command!\n"); } } if (got_response) { /* read back response */ *lsb = inb_p(pcipcwd_private.io_addr + 4); *msb = inb_p(pcipcwd_private.io_addr + 5); /* clear WRSP bit */ inb_p(pcipcwd_private.io_addr + 6); if (debug >= DEBUG) pr_debug("received following data for cmd=0x%02x: msb=0x%02x lsb=0x%02x\n", cmd, *msb, *lsb); } spin_unlock(&pcipcwd_private.io_lock); return got_response; } static inline void pcipcwd_check_temperature_support(void) { if (inb_p(pcipcwd_private.io_addr) != 0xF0) pcipcwd_private.supports_temp = 1; } static int pcipcwd_get_option_switches(void) { int option_switches; option_switches = inb_p(pcipcwd_private.io_addr + 3); return option_switches; } static void pcipcwd_show_card_info(void) { int got_fw_rev, fw_rev_major, fw_rev_minor; char fw_ver_str[20]; /* The cards firmware version */ int option_switches; got_fw_rev = send_command(CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); if (got_fw_rev) sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); else sprintf(fw_ver_str, "<card no answer>"); /* Get switch settings */ option_switches = pcipcwd_get_option_switches(); pr_info("Found card at port 0x%04x (Firmware: %s) %s temp option\n", (int) pcipcwd_private.io_addr, fw_ver_str, (pcipcwd_private.supports_temp ? "with" : "without")); pr_info("Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n", option_switches, ((option_switches & 0x10) ? "ON" : "OFF"), ((option_switches & 0x08) ? "ON" : "OFF")); if (pcipcwd_private.boot_status & WDIOF_CARDRESET) pr_info("Previous reset was caused by the Watchdog card\n"); if (pcipcwd_private.boot_status & WDIOF_OVERHEAT) pr_info("Card sensed a CPU Overheat\n"); if (pcipcwd_private.boot_status == 0) pr_info("No previous trip detected - Cold boot or reset\n"); } static int pcipcwd_start(void) { int stat_reg; spin_lock(&pcipcwd_private.io_lock); outb_p(0x00, pcipcwd_private.io_addr + 3); udelay(1000); stat_reg = inb_p(pcipcwd_private.io_addr + 2); spin_unlock(&pcipcwd_private.io_lock); if (stat_reg & WD_PCI_WDIS) { pr_err("Card timer not enabled\n"); return -1; } if (debug >= VERBOSE) pr_debug("Watchdog started\n"); return 0; } static int pcipcwd_stop(void) { int stat_reg; spin_lock(&pcipcwd_private.io_lock); outb_p(0xA5, pcipcwd_private.io_addr + 3); udelay(1000); outb_p(0xA5, pcipcwd_private.io_addr + 3); udelay(1000); stat_reg = inb_p(pcipcwd_private.io_addr + 2); spin_unlock(&pcipcwd_private.io_lock); if (!(stat_reg & WD_PCI_WDIS)) { pr_err("Card did not acknowledge disable attempt\n"); return -1; } if (debug >= VERBOSE) pr_debug("Watchdog stopped\n"); return 0; } static int pcipcwd_keepalive(void) { /* Re-trigger watchdog by writing to port 0 */ spin_lock(&pcipcwd_private.io_lock); outb_p(0x42, pcipcwd_private.io_addr); /* send out any data */ spin_unlock(&pcipcwd_private.io_lock); if (debug >= DEBUG) pr_debug("Watchdog keepalive signal send\n"); return 0; } static int pcipcwd_set_heartbeat(int t) { int t_msb = t / 256; int t_lsb = t % 256; if ((t < 0x0001) || (t > 0xFFFF)) return -EINVAL; /* Write new heartbeat to watchdog */ send_command(CMD_WRITE_WATCHDOG_TIMEOUT, &t_msb, &t_lsb); heartbeat = t; if (debug >= VERBOSE) pr_debug("New heartbeat: %d\n", heartbeat); return 0; } static int pcipcwd_get_status(int *status) { int control_status; *status = 0; control_status = inb_p(pcipcwd_private.io_addr + 1); if (control_status & WD_PCI_WTRP) *status |= WDIOF_CARDRESET; if (control_status & WD_PCI_TTRP) { *status |= WDIOF_OVERHEAT; if (temp_panic) panic(KBUILD_MODNAME ": Temperature overheat trip!\n"); } if (debug >= DEBUG) pr_debug("Control Status #1: 0x%02x\n", control_status); return 0; } static int pcipcwd_clear_status(void) { int control_status; int msb; int reset_counter; if (debug >= VERBOSE) pr_info("clearing watchdog trip status & LED\n"); control_status = inb_p(pcipcwd_private.io_addr + 1); if (debug >= DEBUG) { pr_debug("status was: 0x%02x\n", control_status); pr_debug("sending: 0x%02x\n", (control_status & WD_PCI_R2DS) | WD_PCI_WTRP); } /* clear trip status & LED and keep mode of relay 2 */ outb_p((control_status & WD_PCI_R2DS) | WD_PCI_WTRP, pcipcwd_private.io_addr + 1); /* clear reset counter */ msb = 0; reset_counter = 0xff; send_command(CMD_GET_CLEAR_RESET_COUNT, &msb, &reset_counter); if (debug >= DEBUG) { pr_debug("reset count was: 0x%02x\n", reset_counter); } return 0; } static int pcipcwd_get_temperature(int *temperature) { *temperature = 0; if (!pcipcwd_private.supports_temp) return -ENODEV; spin_lock(&pcipcwd_private.io_lock); *temperature = inb_p(pcipcwd_private.io_addr); spin_unlock(&pcipcwd_private.io_lock); /* * Convert celsius to fahrenheit, since this was * the decided 'standard' for this return value. */ *temperature = (*temperature * 9 / 5) + 32; if (debug >= DEBUG) { pr_debug("temperature is: %d F\n", *temperature); } return 0; } static int pcipcwd_get_timeleft(int *time_left) { int msb; int lsb; /* Read the time that's left before rebooting */ /* Note: if the board is not yet armed then we will read 0xFFFF */ send_command(CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb); *time_left = (msb << 8) + lsb; if (debug >= VERBOSE) pr_debug("Time left before next reboot: %d\n", *time_left); return 0; } /* * /dev/watchdog handling */ static ssize_t pcipcwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character * five months ago... */ expect_release = 0; /* scan to see whether or not we got the * magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_release = 42; } } /* someone wrote to us, we should reload the timer */ pcipcwd_keepalive(); } return len; } static long pcipcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = WATCHDOG_DRIVER_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: { int status; pcipcwd_get_status(&status); return put_user(status, p); } case WDIOC_GETBOOTSTATUS: return put_user(pcipcwd_private.boot_status, p); case WDIOC_GETTEMP: { int temperature; if (pcipcwd_get_temperature(&temperature)) return -EFAULT; return put_user(temperature, p); } case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { if (pcipcwd_stop()) return -EIO; retval = 0; } if (new_options & WDIOS_ENABLECARD) { if (pcipcwd_start()) return -EIO; retval = 0; } if (new_options & WDIOS_TEMPPANIC) { temp_panic = 1; retval = 0; } return retval; } case WDIOC_KEEPALIVE: pcipcwd_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_heartbeat; if (get_user(new_heartbeat, p)) return -EFAULT; if (pcipcwd_set_heartbeat(new_heartbeat)) return -EINVAL; pcipcwd_keepalive(); /* Fall */ } case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); case WDIOC_GETTIMELEFT: { int time_left; if (pcipcwd_get_timeleft(&time_left)) return -EFAULT; return put_user(time_left, p); } default: return -ENOTTY; } } static int pcipcwd_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &is_active)) { if (debug >= VERBOSE) pr_err("Attempt to open already opened device\n"); return -EBUSY; } /* Activate */ pcipcwd_start(); pcipcwd_keepalive(); return nonseekable_open(inode, file); } static int pcipcwd_release(struct inode *inode, struct file *file) { /* * Shut off the timer. */ if (expect_release == 42) { pcipcwd_stop(); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); pcipcwd_keepalive(); } expect_release = 0; clear_bit(0, &is_active); return 0; } /* * /dev/temperature handling */ static ssize_t pcipcwd_temp_read(struct file *file, char __user *data, size_t len, loff_t *ppos) { int temperature; if (pcipcwd_get_temperature(&temperature)) return -EFAULT; if (copy_to_user(data, &temperature, 1)) return -EFAULT; return 1; } static int pcipcwd_temp_open(struct inode *inode, struct file *file) { if (!pcipcwd_private.supports_temp) return -ENODEV; return nonseekable_open(inode, file); } static int pcipcwd_temp_release(struct inode *inode, struct file *file) { return 0; } /* * Notify system */ static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) pcipcwd_stop(); /* Turn the WDT off */ return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations pcipcwd_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = pcipcwd_write, .unlocked_ioctl = pcipcwd_ioctl, .open = pcipcwd_open, .release = pcipcwd_release, }; static struct miscdevice pcipcwd_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &pcipcwd_fops, }; static const struct file_operations pcipcwd_temp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pcipcwd_temp_read, .open = pcipcwd_temp_open, .release = pcipcwd_temp_release, }; static struct miscdevice pcipcwd_temp_miscdev = { .minor = TEMP_MINOR, .name = "temperature", .fops = &pcipcwd_temp_fops, }; static struct notifier_block pcipcwd_notifier = { .notifier_call = pcipcwd_notify_sys, }; /* * Init & exit routines */ static int __devinit pcipcwd_card_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = -EIO; cards_found++; if (cards_found == 1) pr_info("%s\n", DRIVER_VERSION); if (cards_found > 1) { pr_err("This driver only supports 1 device\n"); return -ENODEV; } if (pci_enable_device(pdev)) { pr_err("Not possible to enable PCI Device\n"); return -ENODEV; } if (pci_resource_start(pdev, 0) == 0x0000) { pr_err("No I/O-Address for card detected\n"); ret = -ENODEV; goto err_out_disable_device; } pcipcwd_private.pdev = pdev; pcipcwd_private.io_addr = pci_resource_start(pdev, 0); if (pci_request_regions(pdev, WATCHDOG_NAME)) { pr_err("I/O address 0x%04x already in use\n", (int) pcipcwd_private.io_addr); ret = -EIO; goto err_out_disable_device; } /* get the boot_status */ pcipcwd_get_status(&pcipcwd_private.boot_status); /* clear the "card caused reboot" flag */ pcipcwd_clear_status(); /* disable card */ pcipcwd_stop(); /* Check whether or not the card supports the temperature device */ pcipcwd_check_temperature_support(); /* Show info about the card itself */ pcipcwd_show_card_info(); /* If heartbeat = 0 then we use the heartbeat from the dip-switches */ if (heartbeat == 0) heartbeat = heartbeat_tbl[(pcipcwd_get_option_switches() & 0x07)]; /* Check that the heartbeat value is within it's range ; * if not reset to the default */ if (pcipcwd_set_heartbeat(heartbeat)) { pcipcwd_set_heartbeat(WATCHDOG_HEARTBEAT); pr_info("heartbeat value must be 0<heartbeat<65536, using %d\n", WATCHDOG_HEARTBEAT); } ret = register_reboot_notifier(&pcipcwd_notifier); if (ret != 0) { pr_err("cannot register reboot notifier (err=%d)\n", ret); goto err_out_release_region; } if (pcipcwd_private.supports_temp) { ret = misc_register(&pcipcwd_temp_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", TEMP_MINOR, ret); goto err_out_unregister_reboot; } } ret = misc_register(&pcipcwd_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto err_out_misc_deregister; } pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n", heartbeat, nowayout); return 0; err_out_misc_deregister: if (pcipcwd_private.supports_temp) misc_deregister(&pcipcwd_temp_miscdev); err_out_unregister_reboot: unregister_reboot_notifier(&pcipcwd_notifier); err_out_release_region: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); return ret; } static void __devexit pcipcwd_card_exit(struct pci_dev *pdev) { /* Stop the timer before we leave */ if (!nowayout) pcipcwd_stop(); /* Deregister */ misc_deregister(&pcipcwd_miscdev); if (pcipcwd_private.supports_temp) misc_deregister(&pcipcwd_temp_miscdev); unregister_reboot_notifier(&pcipcwd_notifier); pci_release_regions(pdev); pci_disable_device(pdev); cards_found--; } static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = { { PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD, PCI_ANY_ID, PCI_ANY_ID, }, { 0 }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, pcipcwd_pci_tbl); static struct pci_driver pcipcwd_driver = { .name = WATCHDOG_NAME, .id_table = pcipcwd_pci_tbl, .probe = pcipcwd_card_init, .remove = __devexit_p(pcipcwd_card_exit), }; static int __init pcipcwd_init_module(void) { spin_lock_init(&pcipcwd_private.io_lock); return pci_register_driver(&pcipcwd_driver); } static void __exit pcipcwd_cleanup_module(void) { pci_unregister_driver(&pcipcwd_driver); pr_info("Watchdog Module Unloaded\n"); } module_init(pcipcwd_init_module); module_exit(pcipcwd_cleanup_module); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>"); MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS_MISCDEV(TEMP_MINOR);
gpl-2.0
shutt1e/lge_kernel_3.x
drivers/media/dvb/dvb-usb/dtv5100.c
4804
5845
/* * DVB USB Linux driver for AME DTV-5100 USB2.0 DVB-T * * Copyright (C) 2008 Antoine Jacquet <royale@zerezo.com> * http://royale.zerezo.com/dtv5100/ * * Inspired by gl861.c and au6610.c drivers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "dtv5100.h" #include "zl10353.h" #include "qt1010.h" /* debug */ static int dvb_usb_dtv5100_debug; module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 request; u8 type; u16 value; u16 index; switch (wlen) { case 1: /* write { reg }, read { value } */ request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ : DTV5100_TUNER_READ); type = USB_TYPE_VENDOR | USB_DIR_IN; value = 0; break; case 2: /* write { reg, value } */ request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE : DTV5100_TUNER_WRITE); type = USB_TYPE_VENDOR | USB_DIR_OUT; value = wbuf[1]; break; default: warn("wlen = %x, aborting.", wlen); return -EINVAL; } index = (addr << 8) + wbuf[0]; msleep(1); /* avoid I2C errors */ return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request, type, value, index, rbuf, rlen, DTV5100_USB_TIMEOUT); } /* I2C */ static int dtv5100_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (dtv5100_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len) < 0) break; i++; } else if (dtv5100_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0) < 0) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 dtv5100_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm dtv5100_i2c_algo = { .master_xfer = dtv5100_i2c_xfer, .functionality = dtv5100_i2c_func, }; /* Callbacks for DVB USB */ static struct zl10353_config dtv5100_zl10353_config = { .demod_address = DTV5100_DEMOD_ADDR, .no_tuner = 1, .parallel_ts = 1, }; static int dtv5100_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe = dvb_attach(zl10353_attach, &dtv5100_zl10353_config, &adap->dev->i2c_adap); if (adap->fe == NULL) return -EIO; /* disable i2c gate, or it won't work... is this safe? */ adap->fe->ops.i2c_gate_ctrl = NULL; return 0; } static struct qt1010_config dtv5100_qt1010_config = { .i2c_address = DTV5100_TUNER_ADDR }; static int dtv5100_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap, &dtv5100_qt1010_config) == NULL ? -ENODEV : 0; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties dtv5100_properties; static int dtv5100_probe(struct usb_interface *intf, const struct usb_device_id *id) { int i, ret; struct usb_device *udev = interface_to_usbdev(intf); /* initialize non qt1010/zl10353 part? */ for (i = 0; dtv5100_init[i].request; i++) { ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), dtv5100_init[i].request, USB_TYPE_VENDOR | USB_DIR_OUT, dtv5100_init[i].value, dtv5100_init[i].index, NULL, 0, DTV5100_USB_TIMEOUT); if (ret) return ret; } ret = dvb_usb_device_init(intf, &dtv5100_properties, THIS_MODULE, NULL, adapter_nr); if (ret) return ret; return 0; } static struct usb_device_id dtv5100_table[] = { { USB_DEVICE(0x06be, 0xa232) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, dtv5100_table); static struct dvb_usb_device_properties dtv5100_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = 0, .num_adapters = 1, .adapter = {{ .frontend_attach = dtv5100_frontend_attach, .tuner_attach = dtv5100_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, .i2c_algo = &dtv5100_i2c_algo, .num_device_descs = 1, .devices = { { .name = "AME DTV-5100 USB2.0 DVB-T", .cold_ids = { NULL }, .warm_ids = { &dtv5100_table[0], NULL }, }, } }; static struct usb_driver dtv5100_driver = { .name = "dvb_usb_dtv5100", .probe = dtv5100_probe, .disconnect = dvb_usb_device_exit, .id_table = dtv5100_table, }; /* module stuff */ static int __init dtv5100_module_init(void) { int ret; ret = usb_register(&dtv5100_driver); if (ret) err("usb_register failed. Error number %d", ret); return ret; } static void __exit dtv5100_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&dtv5100_driver); } module_init(dtv5100_module_init); module_exit(dtv5100_module_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
CaptainThrowback/kernel_htc_e8
drivers/char/hw_random/n2-drv.c
5060
18584
/* n2-drv.c: Niagara-2 RNG driver. * * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/preempt.h> #include <linux/hw_random.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/hypervisor.h> #include "n2rng.h" #define DRV_MODULE_NAME "n2rng" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "0.2" #define DRV_MODULE_RELDATE "July 27, 2011" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Niagara2 RNG driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); /* The Niagara2 RNG provides a 64-bit read-only random number * register, plus a control register. Access to the RNG is * virtualized through the hypervisor so that both guests and control * nodes can access the device. * * The entropy source consists of raw entropy sources, each * constructed from a voltage controlled oscillator whose phase is * jittered by thermal noise sources. * * The oscillator in each of the three raw entropy sources run at * different frequencies. Normally, all three generator outputs are * gathered, xored together, and fed into a CRC circuit, the output of * which is the 64-bit read-only register. * * Some time is necessary for all the necessary entropy to build up * such that a full 64-bits of entropy are available in the register. * In normal operating mode (RNG_CTL_LFSR is set), the chip implements * an interlock which blocks register reads until sufficient entropy * is available. * * A control register is provided for adjusting various aspects of RNG * operation, and to enable diagnostic modes. Each of the three raw * entropy sources has an enable bit (RNG_CTL_ES{1,2,3}). Also * provided are fields for controlling the minimum time in cycles * between read accesses to the register (RNG_CTL_WAIT, this controls * the interlock described in the previous paragraph). * * The standard setting is to have the mode bit (RNG_CTL_LFSR) set, * all three entropy sources enabled, and the interlock time set * appropriately. * * The CRC polynomial used by the chip is: * * P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 + * x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 + * x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1 * * The RNG_CTL_VCO value of each noise cell must be programmed * separately. This is why 4 control register values must be provided * to the hypervisor. During a write, the hypervisor writes them all, * one at a time, to the actual RNG_CTL register. The first three * values are used to setup the desired RNG_CTL_VCO for each entropy * source, for example: * * control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1 * control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2 * control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3 * * And then the fourth value sets the final chip state and enables * desired. */ static int n2rng_hv_err_trans(unsigned long hv_err) { switch (hv_err) { case HV_EOK: return 0; case HV_EWOULDBLOCK: return -EAGAIN; case HV_ENOACCESS: return -EPERM; case HV_EIO: return -EIO; case HV_EBUSY: return -EBUSY; case HV_EBADALIGN: case HV_ENORADDR: return -EFAULT; default: return -EINVAL; } } static unsigned long n2rng_generic_read_control_v2(unsigned long ra, unsigned long unit) { unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status; int block = 0, busy = 0; while (1) { hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state, &ticks, &watchdog_delta, &watchdog_status); if (hv_err == HV_EOK) break; if (hv_err == HV_EBUSY) { if (++busy >= N2RNG_BUSY_LIMIT) break; udelay(1); } else if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) break; __delay(ticks); } else break; } return hv_err; } /* In multi-socket situations, the hypervisor might need to * queue up the RNG control register write if it's for a unit * that is on a cpu socket other than the one we are executing on. * * We poll here waiting for a successful read of that control * register to make sure the write has been actually performed. */ static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit) { unsigned long ra = __pa(&np->scratch_control[0]); return n2rng_generic_read_control_v2(ra, unit); } static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit, unsigned long state, unsigned long control_ra, unsigned long watchdog_timeout, unsigned long *ticks) { unsigned long hv_err; if (np->hvapi_major == 1) { hv_err = sun4v_rng_ctl_write_v1(control_ra, state, watchdog_timeout, ticks); } else { hv_err = sun4v_rng_ctl_write_v2(control_ra, state, watchdog_timeout, unit); if (hv_err == HV_EOK) hv_err = n2rng_control_settle_v2(np, unit); *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; } return hv_err; } static int n2rng_generic_read_data(unsigned long data_ra) { unsigned long ticks, hv_err; int block = 0, hcheck = 0; while (1) { hv_err = sun4v_rng_data_read(data_ra, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_ENOACCESS) { return -EPERM; } else if (hv_err == HV_EIO) { if (++hcheck >= N2RNG_HCHECK_LIMIT) return -EIO; udelay(10000); } else return -ENODEV; } } static unsigned long n2rng_read_diag_data_one(struct n2rng *np, unsigned long unit, unsigned long data_ra, unsigned long data_len, unsigned long *ticks) { unsigned long hv_err; if (np->hvapi_major == 1) { hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks); } else { hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len, unit, ticks); if (!*ticks) *ticks = N2RNG_ACCUM_CYCLES_DEFAULT; } return hv_err; } static int n2rng_generic_read_diag_data(struct n2rng *np, unsigned long unit, unsigned long data_ra, unsigned long data_len) { unsigned long ticks, hv_err; int block = 0; while (1) { hv_err = n2rng_read_diag_data_one(np, unit, data_ra, data_len, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_ENOACCESS) { return -EPERM; } else if (hv_err == HV_EIO) { return -EIO; } else return -ENODEV; } } static int n2rng_generic_write_control(struct n2rng *np, unsigned long control_ra, unsigned long unit, unsigned long state) { unsigned long hv_err, ticks; int block = 0, busy = 0; while (1) { hv_err = n2rng_write_ctl_one(np, unit, state, control_ra, np->wd_timeo, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_EBUSY) { if (++busy >= N2RNG_BUSY_LIMIT) return -EBUSY; udelay(1); } else return -ENODEV; } } /* Just try to see if we can successfully access the control register * of the RNG on the domain on which we are currently executing. */ static int n2rng_try_read_ctl(struct n2rng *np) { unsigned long hv_err; unsigned long x; if (np->hvapi_major == 1) { hv_err = sun4v_rng_get_diag_ctl(); } else { /* We purposefully give invalid arguments, HV_NOACCESS * is higher priority than the errors we'd get from * these other cases, and that's the error we are * truly interested in. */ hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x); switch (hv_err) { case HV_EWOULDBLOCK: case HV_ENOACCESS: break; default: hv_err = HV_EOK; break; } } return n2rng_hv_err_trans(hv_err); } #define CONTROL_DEFAULT_BASE \ ((2 << RNG_CTL_ASEL_SHIFT) | \ (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_CTL_WAIT_SHIFT) | \ RNG_CTL_LFSR) #define CONTROL_DEFAULT_0 \ (CONTROL_DEFAULT_BASE | \ (1 << RNG_CTL_VCO_SHIFT) | \ RNG_CTL_ES1) #define CONTROL_DEFAULT_1 \ (CONTROL_DEFAULT_BASE | \ (2 << RNG_CTL_VCO_SHIFT) | \ RNG_CTL_ES2) #define CONTROL_DEFAULT_2 \ (CONTROL_DEFAULT_BASE | \ (3 << RNG_CTL_VCO_SHIFT) | \ RNG_CTL_ES3) #define CONTROL_DEFAULT_3 \ (CONTROL_DEFAULT_BASE | \ RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3) static void n2rng_control_swstate_init(struct n2rng *np) { int i; np->flags |= N2RNG_FLAG_CONTROL; np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT; np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT; np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT; for (i = 0; i < np->num_units; i++) { struct n2rng_unit *up = &np->units[i]; up->control[0] = CONTROL_DEFAULT_0; up->control[1] = CONTROL_DEFAULT_1; up->control[2] = CONTROL_DEFAULT_2; up->control[3] = CONTROL_DEFAULT_3; } np->hv_state = HV_RNG_STATE_UNCONFIGURED; } static int n2rng_grab_diag_control(struct n2rng *np) { int i, busy_count, err = -ENODEV; busy_count = 0; for (i = 0; i < 100; i++) { err = n2rng_try_read_ctl(np); if (err != -EAGAIN) break; if (++busy_count > 100) { dev_err(&np->op->dev, "Grab diag control timeout.\n"); return -ENODEV; } udelay(1); } return err; } static int n2rng_init_control(struct n2rng *np) { int err = n2rng_grab_diag_control(np); /* Not in the control domain, that's OK we are only a consumer * of the RNG data, we don't setup and program it. */ if (err == -EPERM) return 0; if (err) return err; n2rng_control_swstate_init(np); return 0; } static int n2rng_data_read(struct hwrng *rng, u32 *data) { struct n2rng *np = (struct n2rng *) rng->priv; unsigned long ra = __pa(&np->test_data); int len; if (!(np->flags & N2RNG_FLAG_READY)) { len = 0; } else if (np->flags & N2RNG_FLAG_BUFFER_VALID) { np->flags &= ~N2RNG_FLAG_BUFFER_VALID; *data = np->buffer; len = 4; } else { int err = n2rng_generic_read_data(ra); if (!err) { np->buffer = np->test_data >> 32; *data = np->test_data & 0xffffffff; len = 4; } else { dev_err(&np->op->dev, "RNG error, restesting\n"); np->flags &= ~N2RNG_FLAG_READY; if (!(np->flags & N2RNG_FLAG_SHUTDOWN)) schedule_delayed_work(&np->work, 0); len = 0; } } return len; } /* On a guest node, just make sure we can read random data properly. * If a control node reboots or reloads it's n2rng driver, this won't * work during that time. So we have to keep probing until the device * becomes usable. */ static int n2rng_guest_check(struct n2rng *np) { unsigned long ra = __pa(&np->test_data); return n2rng_generic_read_data(ra); } static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit, u64 *pre_control, u64 pre_state, u64 *buffer, unsigned long buf_len, u64 *post_control, u64 post_state) { unsigned long post_ctl_ra = __pa(post_control); unsigned long pre_ctl_ra = __pa(pre_control); unsigned long buffer_ra = __pa(buffer); int err; err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state); if (err) return err; err = n2rng_generic_read_diag_data(np, unit, buffer_ra, buf_len); (void) n2rng_generic_write_control(np, post_ctl_ra, unit, post_state); return err; } static u64 advance_polynomial(u64 poly, u64 val, int count) { int i; for (i = 0; i < count; i++) { int highbit_set = ((s64)val < 0); val <<= 1; if (highbit_set) val ^= poly; } return val; } static int n2rng_test_buffer_find(struct n2rng *np, u64 val) { int i, count = 0; /* Purposefully skip over the first word. */ for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) { if (np->test_buffer[i] == val) count++; } return count; } static void n2rng_dump_test_buffer(struct n2rng *np) { int i; for (i = 0; i < SELFTEST_BUFFER_WORDS; i++) dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n", i, np->test_buffer[i]); } static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit) { u64 val = SELFTEST_VAL; int err, matches, limit; matches = 0; for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) { matches += n2rng_test_buffer_find(np, val); if (matches >= SELFTEST_MATCH_GOAL) break; val = advance_polynomial(SELFTEST_POLY, val, 1); } err = 0; if (limit >= SELFTEST_LOOPS_MAX) { err = -ENODEV; dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit); n2rng_dump_test_buffer(np); } else dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit); return err; } static int n2rng_control_selftest(struct n2rng *np, unsigned long unit) { int err; np->test_control[0] = (0x2 << RNG_CTL_ASEL_SHIFT); np->test_control[1] = (0x2 << RNG_CTL_ASEL_SHIFT); np->test_control[2] = (0x2 << RNG_CTL_ASEL_SHIFT); np->test_control[3] = ((0x2 << RNG_CTL_ASEL_SHIFT) | RNG_CTL_LFSR | ((SELFTEST_TICKS - 2) << RNG_CTL_WAIT_SHIFT)); err = n2rng_entropy_diag_read(np, unit, np->test_control, HV_RNG_STATE_HEALTHCHECK, np->test_buffer, sizeof(np->test_buffer), &np->units[unit].control[0], np->hv_state); if (err) return err; return n2rng_check_selftest_buffer(np, unit); } static int n2rng_control_check(struct n2rng *np) { int i; for (i = 0; i < np->num_units; i++) { int err = n2rng_control_selftest(np, i); if (err) return err; } return 0; } /* The sanity checks passed, install the final configuration into the * chip, it's ready to use. */ static int n2rng_control_configure_units(struct n2rng *np) { int unit, err; err = 0; for (unit = 0; unit < np->num_units; unit++) { struct n2rng_unit *up = &np->units[unit]; unsigned long ctl_ra = __pa(&up->control[0]); int esrc; u64 base; base = ((np->accum_cycles << RNG_CTL_WAIT_SHIFT) | (2 << RNG_CTL_ASEL_SHIFT) | RNG_CTL_LFSR); /* XXX This isn't the best. We should fetch a bunch * XXX of words using each entropy source combined XXX * with each VCO setting, and see which combinations * XXX give the best random data. */ for (esrc = 0; esrc < 3; esrc++) up->control[esrc] = base | (esrc << RNG_CTL_VCO_SHIFT) | (RNG_CTL_ES1 << esrc); up->control[3] = base | (RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3); err = n2rng_generic_write_control(np, ctl_ra, unit, HV_RNG_STATE_CONFIGURED); if (err) break; } return err; } static void n2rng_work(struct work_struct *work) { struct n2rng *np = container_of(work, struct n2rng, work.work); int err = 0; if (!(np->flags & N2RNG_FLAG_CONTROL)) { err = n2rng_guest_check(np); } else { preempt_disable(); err = n2rng_control_check(np); preempt_enable(); if (!err) err = n2rng_control_configure_units(np); } if (!err) { np->flags |= N2RNG_FLAG_READY; dev_info(&np->op->dev, "RNG ready\n"); } if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN)) schedule_delayed_work(&np->work, HZ * 2); } static void __devinit n2rng_driver_version(void) { static int n2rng_version_printed; if (n2rng_version_printed++ == 0) pr_info("%s", version); } static const struct of_device_id n2rng_match[]; static int __devinit n2rng_probe(struct platform_device *op) { const struct of_device_id *match; int multi_capable; int err = -ENOMEM; struct n2rng *np; match = of_match_device(n2rng_match, &op->dev); if (!match) return -EINVAL; multi_capable = (match->data != NULL); n2rng_driver_version(); np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) goto out; np->op = op; INIT_DELAYED_WORK(&np->work, n2rng_work); if (multi_capable) np->flags |= N2RNG_FLAG_MULTI; err = -ENODEV; np->hvapi_major = 2; if (sun4v_hvapi_register(HV_GRP_RNG, np->hvapi_major, &np->hvapi_minor)) { np->hvapi_major = 1; if (sun4v_hvapi_register(HV_GRP_RNG, np->hvapi_major, &np->hvapi_minor)) { dev_err(&op->dev, "Cannot register suitable " "HVAPI version.\n"); goto out_free; } } if (np->flags & N2RNG_FLAG_MULTI) { if (np->hvapi_major < 2) { dev_err(&op->dev, "multi-unit-capable RNG requires " "HVAPI major version 2 or later, got %lu\n", np->hvapi_major); goto out_hvapi_unregister; } np->num_units = of_getintprop_default(op->dev.of_node, "rng-#units", 0); if (!np->num_units) { dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); goto out_hvapi_unregister; } } else np->num_units = 1; dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n", np->hvapi_major, np->hvapi_minor); np->units = kzalloc(sizeof(struct n2rng_unit) * np->num_units, GFP_KERNEL); err = -ENOMEM; if (!np->units) goto out_hvapi_unregister; err = n2rng_init_control(np); if (err) goto out_free_units; dev_info(&op->dev, "Found %s RNG, units: %d\n", ((np->flags & N2RNG_FLAG_MULTI) ? "multi-unit-capable" : "single-unit"), np->num_units); np->hwrng.name = "n2rng"; np->hwrng.data_read = n2rng_data_read; np->hwrng.priv = (unsigned long) np; err = hwrng_register(&np->hwrng); if (err) goto out_free_units; dev_set_drvdata(&op->dev, np); schedule_delayed_work(&np->work, 0); return 0; out_free_units: kfree(np->units); np->units = NULL; out_hvapi_unregister: sun4v_hvapi_unregister(HV_GRP_RNG); out_free: kfree(np); out: return err; } static int __devexit n2rng_remove(struct platform_device *op) { struct n2rng *np = dev_get_drvdata(&op->dev); np->flags |= N2RNG_FLAG_SHUTDOWN; cancel_delayed_work_sync(&np->work); hwrng_unregister(&np->hwrng); sun4v_hvapi_unregister(HV_GRP_RNG); kfree(np->units); np->units = NULL; kfree(np); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id n2rng_match[] = { { .name = "random-number-generator", .compatible = "SUNW,n2-rng", }, { .name = "random-number-generator", .compatible = "SUNW,vf-rng", .data = (void *) 1, }, { .name = "random-number-generator", .compatible = "SUNW,kt-rng", .data = (void *) 1, }, {}, }; MODULE_DEVICE_TABLE(of, n2rng_match); static struct platform_driver n2rng_driver = { .driver = { .name = "n2rng", .owner = THIS_MODULE, .of_match_table = n2rng_match, }, .probe = n2rng_probe, .remove = __devexit_p(n2rng_remove), }; module_platform_driver(n2rng_driver);
gpl-2.0
su-haris/falcon_kernel
arch/m32r/platforms/m32700ut/setup.c
8900
11775
/* * linux/arch/m32r/platforms/m32700ut/setup.c * * Setup routines for Renesas M32700UT Board * * Copyright (c) 2002-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Takeo Takahashi * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/m32r.h> #include <asm/io.h> /* * M32700 Interrupt Control Unit (Level 1) */ #define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; static void disable_m32700ut_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7; outl(data, port); } static void enable_m32700ut_irq(unsigned int irq) { unsigned long port, data; port = irq2port(irq); data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6; outl(data, port); } static void mask_m32700ut(struct irq_data *data) { disable_m32700ut_irq(data->irq); } static void unmask_m32700ut(struct irq_data *data) { enable_m32700ut_irq(data->irq); } static void shutdown_m32700ut(struct irq_data *data) { unsigned long port; port = irq2port(data->irq); outl(M32R_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_irq_type = { .name = "M32700UT-IRQ", .irq_shutdown = shutdown_m32700ut, .irq_mask = mask_m32700ut, .irq_unmask = unmask_m32700ut }; /* * Interrupt Control Unit of PLD on M32700UT (Level 2) */ #define irq2pldirq(x) ((x) - M32700UT_PLD_IRQ_BASE) #define pldirq2port(x) (unsigned long)((int)PLD_ICUCR1 + \ (((x) - 1) * sizeof(unsigned short))) typedef struct { unsigned short icucr; /* ICU Control Register */ } pld_icu_data_t; static pld_icu_data_t pld_icu_data[M32700UT_NUM_PLD_IRQ]; static void disable_m32700ut_pld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2pldirq(irq); port = pldirq2port(pldirq); data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7; outw(data, port); } static void enable_m32700ut_pld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2pldirq(irq); port = pldirq2port(pldirq); data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6; outw(data, port); } static void mask_m32700ut_pld(struct irq_data *data) { disable_m32700ut_pld_irq(data->irq); } static void unmask_m32700ut_pld(struct irq_data *data) { enable_m32700ut_pld_irq(data->irq); enable_m32700ut_irq(M32R_IRQ_INT1); } static void shutdown_m32700ut_pld_irq(struct irq_data *data) { unsigned long port; unsigned int pldirq; pldirq = irq2pldirq(data->irq); port = pldirq2port(pldirq); outw(PLD_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_pld_irq_type = { .name = "M32700UT-PLD-IRQ", .irq_shutdown = shutdown_m32700ut_pld_irq, .irq_mask = mask_m32700ut_pld, .irq_unmask = unmask_m32700ut_pld, }; /* * Interrupt Control Unit of PLD on M32700UT-LAN (Level 2) */ #define irq2lanpldirq(x) ((x) - M32700UT_LAN_PLD_IRQ_BASE) #define lanpldirq2port(x) (unsigned long)((int)M32700UT_LAN_ICUCR1 + \ (((x) - 1) * sizeof(unsigned short))) static pld_icu_data_t lanpld_icu_data[M32700UT_NUM_LAN_PLD_IRQ]; static void disable_m32700ut_lanpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lanpldirq(irq); port = lanpldirq2port(pldirq); data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7; outw(data, port); } static void enable_m32700ut_lanpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lanpldirq(irq); port = lanpldirq2port(pldirq); data = lanpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6; outw(data, port); } static void mask_m32700ut_lanpld(struct irq_data *data) { disable_m32700ut_lanpld_irq(data->irq); } static void unmask_m32700ut_lanpld(struct irq_data *data) { enable_m32700ut_lanpld_irq(data->irq); enable_m32700ut_irq(M32R_IRQ_INT0); } static void shutdown_m32700ut_lanpld(struct irq_data *data) { unsigned long port; unsigned int pldirq; pldirq = irq2lanpldirq(data->irq); port = lanpldirq2port(pldirq); outw(PLD_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_lanpld_irq_type = { .name = "M32700UT-PLD-LAN-IRQ", .irq_shutdown = shutdown_m32700ut_lanpld, .irq_mask = mask_m32700ut_lanpld, .irq_unmask = unmask_m32700ut_lanpld, }; /* * Interrupt Control Unit of PLD on M32700UT-LCD (Level 2) */ #define irq2lcdpldirq(x) ((x) - M32700UT_LCD_PLD_IRQ_BASE) #define lcdpldirq2port(x) (unsigned long)((int)M32700UT_LCD_ICUCR1 + \ (((x) - 1) * sizeof(unsigned short))) static pld_icu_data_t lcdpld_icu_data[M32700UT_NUM_LCD_PLD_IRQ]; static void disable_m32700ut_lcdpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lcdpldirq(irq); port = lcdpldirq2port(pldirq); data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7; outw(data, port); } static void enable_m32700ut_lcdpld_irq(unsigned int irq) { unsigned long port, data; unsigned int pldirq; pldirq = irq2lcdpldirq(irq); port = lcdpldirq2port(pldirq); data = lcdpld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6; outw(data, port); } static void mask_m32700ut_lcdpld(struct irq_data *data) { disable_m32700ut_lcdpld_irq(data->irq); } static void unmask_m32700ut_lcdpld(struct irq_data *data) { enable_m32700ut_lcdpld_irq(data->irq); enable_m32700ut_irq(M32R_IRQ_INT2); } static void shutdown_m32700ut_lcdpld(struct irq_data *data) { unsigned long port; unsigned int pldirq; pldirq = irq2lcdpldirq(data->irq); port = lcdpldirq2port(pldirq); outw(PLD_ICUCR_ILEVEL7, port); } static struct irq_chip m32700ut_lcdpld_irq_type = { .name = "M32700UT-PLD-LCD-IRQ", .irq_shutdown = shutdown_m32700ut_lcdpld, .irq_mask = mask_m32700ut_lcdpld, .irq_unmask = unmask_m32700ut_lcdpld, }; void __init init_IRQ(void) { #if defined(CONFIG_SMC91X) /* INT#0: LAN controller on M32700UT-LAN (SMC91C111)*/ irq_set_chip_and_handler(M32700UT_LAN_IRQ_LAN, &m32700ut_lanpld_irq_type, handle_level_irq); lanpld_icu_data[irq2lanpldirq(M32700UT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* "H" edge sense */ disable_m32700ut_lanpld_irq(M32700UT_LAN_IRQ_LAN); #endif /* CONFIG_SMC91X */ /* MFT2 : system timer */ irq_set_chip_and_handler(M32R_IRQ_MFT2, &m32700ut_irq_type, handle_level_irq); icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN; disable_m32700ut_irq(M32R_IRQ_MFT2); /* SIO0 : receive */ irq_set_chip_and_handler(M32R_IRQ_SIO0_R, &m32700ut_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_R].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO0_R); /* SIO0 : send */ irq_set_chip_and_handler(M32R_IRQ_SIO0_S, &m32700ut_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO0_S].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO0_S); /* SIO1 : receive */ irq_set_chip_and_handler(M32R_IRQ_SIO1_R, &m32700ut_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_R].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO1_R); /* SIO1 : send */ irq_set_chip_and_handler(M32R_IRQ_SIO1_S, &m32700ut_irq_type, handle_level_irq); icu_data[M32R_IRQ_SIO1_S].icucr = 0; disable_m32700ut_irq(M32R_IRQ_SIO1_S); /* DMA1 : */ irq_set_chip_and_handler(M32R_IRQ_DMA1, &m32700ut_irq_type, handle_level_irq); icu_data[M32R_IRQ_DMA1].icucr = 0; disable_m32700ut_irq(M32R_IRQ_DMA1); #ifdef CONFIG_SERIAL_M32R_PLDSIO /* INT#1: SIO0 Receive on PLD */ irq_set_chip_and_handler(PLD_IRQ_SIO0_RCV, &m32700ut_pld_irq_type, handle_level_irq); pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03; disable_m32700ut_pld_irq(PLD_IRQ_SIO0_RCV); /* INT#1: SIO0 Send on PLD */ irq_set_chip_and_handler(PLD_IRQ_SIO0_SND, &m32700ut_pld_irq_type, handle_level_irq); pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03; disable_m32700ut_pld_irq(PLD_IRQ_SIO0_SND); #endif /* CONFIG_SERIAL_M32R_PLDSIO */ /* INT#1: CFC IREQ on PLD */ irq_set_chip_and_handler(PLD_IRQ_CFIREQ, &m32700ut_pld_irq_type, handle_level_irq); pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* 'L' level sense */ disable_m32700ut_pld_irq(PLD_IRQ_CFIREQ); /* INT#1: CFC Insert on PLD */ irq_set_chip_and_handler(PLD_IRQ_CFC_INSERT, &m32700ut_pld_irq_type, handle_level_irq); pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00; /* 'L' edge sense */ disable_m32700ut_pld_irq(PLD_IRQ_CFC_INSERT); /* INT#1: CFC Eject on PLD */ irq_set_chip_and_handler(PLD_IRQ_CFC_EJECT, &m32700ut_pld_irq_type, handle_level_irq); pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02; /* 'H' edge sense */ disable_m32700ut_pld_irq(PLD_IRQ_CFC_EJECT); /* * INT0# is used for LAN, DIO * We enable it here. */ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11; enable_m32700ut_irq(M32R_IRQ_INT0); /* * INT1# is used for UART, MMC, CF Controller in FPGA. * We enable it here. */ icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11; enable_m32700ut_irq(M32R_IRQ_INT1); #if defined(CONFIG_USB) outw(USBCR_OTGS, USBCR); /* USBCR: non-OTG */ irq_set_chip_and_handler(M32700UT_LCD_IRQ_USB_INT1, &m32700ut_lcdpld_irq_type, handle_level_irq); lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01; /* "L" level sense */ disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1); #endif /* * INT2# is used for BAT, USB, AUDIO * We enable it here. */ icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01; enable_m32700ut_irq(M32R_IRQ_INT2); #if defined(CONFIG_VIDEO_M32R_AR) /* * INT3# is used for AR */ irq_set_chip_and_handler(M32R_IRQ_INT3, &m32700ut_irq_type, handle_level_irq); icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; disable_m32700ut_irq(M32R_IRQ_INT3); #endif /* CONFIG_VIDEO_M32R_AR */ } #if defined(CONFIG_SMC91X) #define LAN_IOSTART 0x300 #define LAN_IOEND 0x320 static struct resource smc91x_resources[] = { [0] = { .start = (LAN_IOSTART), .end = (LAN_IOEND), .flags = IORESOURCE_MEM, }, [1] = { .start = M32700UT_LAN_IRQ_LAN, .end = M32700UT_LAN_IRQ_LAN, .flags = IORESOURCE_IRQ, } }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #endif #if defined(CONFIG_FB_S1D13XXX) #include <video/s1d13xxxfb.h> #include <asm/s1d13806.h> static struct s1d13xxxfb_pdata s1d13xxxfb_data = { .initregs = s1d13xxxfb_initregs, .initregssize = ARRAY_SIZE(s1d13xxxfb_initregs), .platform_init_video = NULL, #ifdef CONFIG_PM .platform_suspend_video = NULL, .platform_resume_video = NULL, #endif }; static struct resource s1d13xxxfb_resources[] = { [0] = { .start = 0x10600000UL, .end = 0x1073FFFFUL, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x10400000UL, .end = 0x104001FFUL, .flags = IORESOURCE_MEM, } }; static struct platform_device s1d13xxxfb_device = { .name = S1D_DEVICENAME, .id = 0, .dev = { .platform_data = &s1d13xxxfb_data, }, .num_resources = ARRAY_SIZE(s1d13xxxfb_resources), .resource = s1d13xxxfb_resources, }; #endif static int __init platform_init(void) { #if defined(CONFIG_SMC91X) platform_device_register(&smc91x_device); #endif #if defined(CONFIG_FB_S1D13XXX) platform_device_register(&s1d13xxxfb_device); #endif return 0; } arch_initcall(platform_init);
gpl-2.0
ssumpf/l4linux
net/bridge/netfilter/ebt_mark_m.c
14020
2361
/* * ebt_mark_m * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * July, 2002 * */ #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_mark_m.h> static bool ebt_mark_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_mark_m_info *info = par->matchinfo; if (info->bitmask & EBT_MARK_OR) return !!(skb->mark & info->mask) ^ info->invert; return ((skb->mark & info->mask) == info->mark) ^ info->invert; } static int ebt_mark_mt_check(const struct xt_mtchk_param *par) { const struct ebt_mark_m_info *info = par->matchinfo; if (info->bitmask & ~EBT_MARK_MASK) return -EINVAL; if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND)) return -EINVAL; if (!info->bitmask) return -EINVAL; return 0; } #ifdef CONFIG_COMPAT struct compat_ebt_mark_m_info { compat_ulong_t mark, mask; uint8_t invert, bitmask; }; static void mark_mt_compat_from_user(void *dst, const void *src) { const struct compat_ebt_mark_m_info *user = src; struct ebt_mark_m_info *kern = dst; kern->mark = user->mark; kern->mask = user->mask; kern->invert = user->invert; kern->bitmask = user->bitmask; } static int mark_mt_compat_to_user(void __user *dst, const void *src) { struct compat_ebt_mark_m_info __user *user = dst; const struct ebt_mark_m_info *kern = src; if (put_user(kern->mark, &user->mark) || put_user(kern->mask, &user->mask) || put_user(kern->invert, &user->invert) || put_user(kern->bitmask, &user->bitmask)) return -EFAULT; return 0; } #endif static struct xt_match ebt_mark_mt_reg __read_mostly = { .name = "mark_m", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_mark_mt, .checkentry = ebt_mark_mt_check, .matchsize = sizeof(struct ebt_mark_m_info), #ifdef CONFIG_COMPAT .compatsize = sizeof(struct compat_ebt_mark_m_info), .compat_from_user = mark_mt_compat_from_user, .compat_to_user = mark_mt_compat_to_user, #endif .me = THIS_MODULE, }; static int __init ebt_mark_m_init(void) { return xt_register_match(&ebt_mark_mt_reg); } static void __exit ebt_mark_m_fini(void) { xt_unregister_match(&ebt_mark_mt_reg); } module_init(ebt_mark_m_init); module_exit(ebt_mark_m_fini); MODULE_DESCRIPTION("Ebtables: Packet mark match"); MODULE_LICENSE("GPL");
gpl-2.0
ISTweak/android_kernel_sharp_a01
fs/fuse/file.c
197
52225
/* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/compat.h> static const struct file_operations fuse_direct_io_file_operations; static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, int opcode, struct fuse_open_out *outargp) { struct fuse_open_in inarg; struct fuse_req *req; int err; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&inarg, 0, sizeof(inarg)); inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); if (!fc->atomic_o_trunc) inarg.flags &= ~O_TRUNC; req->in.h.opcode = opcode; req->in.h.nodeid = nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(*outargp); req->out.args[0].value = outargp; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); return err; } struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) { struct fuse_file *ff; ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); if (unlikely(!ff)) return NULL; ff->fc = fc; ff->reserved_req = fuse_request_alloc(); if (unlikely(!ff->reserved_req)) { kfree(ff); return NULL; } INIT_LIST_HEAD(&ff->write_entry); atomic_set(&ff->count, 0); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); spin_lock(&fc->lock); ff->kh = ++fc->khctr; spin_unlock(&fc->lock); return ff; } void fuse_file_free(struct fuse_file *ff) { fuse_request_free(ff->reserved_req); kfree(ff); } struct fuse_file *fuse_file_get(struct fuse_file *ff) { atomic_inc(&ff->count); return ff; } static void fuse_release_async(struct work_struct *work) { struct fuse_req *req; struct fuse_conn *fc; struct path path; req = container_of(work, struct fuse_req, misc.release.work); path = req->misc.release.path; fc = get_fuse_conn(path.dentry->d_inode); fuse_put_request(fc, req); path_put(&path); } static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { if (fc->destroy_req) { /* * If this is a fuseblk mount, then it's possible that * releasing the path will result in releasing the * super block and sending the DESTROY request. If * the server is single threaded, this would hang. * For this reason do the path_put() in a separate * thread. */ atomic_inc(&req->count); INIT_WORK(&req->misc.release.work, fuse_release_async); schedule_work(&req->misc.release.work); } else { path_put(&req->misc.release.path); } } static void fuse_file_put(struct fuse_file *ff, bool sync) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; if (sync) { fuse_request_send(ff->fc, req); path_put(&req->misc.release.path); fuse_put_request(ff->fc, req); } else { req->end = fuse_release_end; fuse_request_send_background(ff->fc, req); } kfree(ff); } } int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, bool isdir) { struct fuse_open_out outarg; struct fuse_file *ff; int err; int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; ff = fuse_file_alloc(fc); if (!ff) return -ENOMEM; err = fuse_send_open(fc, nodeid, file, opcode, &outarg); if (err) { fuse_file_free(ff); return err; } if (isdir) outarg.open_flags &= ~FOPEN_DIRECT_IO; ff->fh = outarg.fh; ff->nodeid = nodeid; ff->open_flags = outarg.open_flags; file->private_data = fuse_file_get(ff); return 0; } EXPORT_SYMBOL_GPL(fuse_do_open); void fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); if (ff->open_flags & FOPEN_DIRECT_IO) file->f_op = &fuse_direct_io_file_operations; if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->attr_version = ++fc->attr_version; i_size_write(inode, 0); spin_unlock(&fc->lock); fuse_invalidate_attr(inode); } } int fuse_open_common(struct inode *inode, struct file *file, bool isdir) { struct fuse_conn *fc = get_fuse_conn(inode); int err; /* VFS checks this, but only _after_ ->open() */ if (file->f_flags & O_DIRECT) return -EINVAL; err = generic_file_open(inode, file); if (err) return err; err = fuse_do_open(fc, get_node_id(inode), file, isdir); if (err) return err; fuse_finish_open(inode, file); return 0; } static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) { struct fuse_conn *fc = ff->fc; struct fuse_req *req = ff->reserved_req; struct fuse_release_in *inarg = &req->misc.release.in; spin_lock(&fc->lock); list_del(&ff->write_entry); if (!RB_EMPTY_NODE(&ff->polled_node)) rb_erase(&ff->polled_node, &fc->polled_files); spin_unlock(&fc->lock); wake_up_interruptible_sync(&ff->poll_wait); inarg->fh = ff->fh; inarg->flags = flags; req->in.h.opcode = opcode; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(struct fuse_release_in); req->in.args[0].value = inarg; } void fuse_release_common(struct file *file, int opcode) { struct fuse_file *ff; struct fuse_req *req; ff = file->private_data; if (unlikely(!ff)) return; req = ff->reserved_req; fuse_prepare_release(ff, file->f_flags, opcode); /* Hold vfsmount and dentry until release is finished */ path_get(&file->f_path); req->misc.release.path = file->f_path; /* * Normally this will send the RELEASE request, however if * some asynchronous READ or WRITE requests are outstanding, * the sending will be delayed. * * Make the release synchronous if this is a fuseblk mount, * synchronous RELEASE is allowed (and desirable) in this case * because the server can be trusted not to screw up. */ fuse_file_put(ff, ff->fc->destroy_req != NULL); } static int fuse_open(struct inode *inode, struct file *file) { return fuse_open_common(inode, file, false); } static int fuse_release(struct inode *inode, struct file *file) { fuse_release_common(file, FUSE_RELEASE); /* return value is ignored by VFS */ return 0; } void fuse_sync_release(struct fuse_file *ff, int flags) { WARN_ON(atomic_read(&ff->count) > 1); fuse_prepare_release(ff, flags, FUSE_RELEASE); ff->reserved_req->force = 1; fuse_request_send(ff->fc, ff->reserved_req); fuse_put_request(ff->fc, ff->reserved_req); kfree(ff); } EXPORT_SYMBOL_GPL(fuse_sync_release); /* * Scramble the ID space with XTEA, so that the value of the files_struct * pointer is not exposed to userspace. */ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) { u32 *k = fc->scramble_key; u64 v = (unsigned long) id; u32 v0 = v; u32 v1 = v >> 32; u32 sum = 0; int i; for (i = 0; i < 32; i++) { v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); sum += 0x9E3779B9; v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); } return (u64) v0 + ((u64) v1 << 32); } /* * Check if page is under writeback * * This is currently done by walking the list of writepage requests * for the inode, which can be pretty inefficient. */ static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_req *req; bool found = false; spin_lock(&fc->lock); list_for_each_entry(req, &fi->writepages, writepages_entry) { pgoff_t curr_index; BUG_ON(req->inode != inode); curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; if (curr_index == index) { found = true; break; } } spin_unlock(&fc->lock); return found; } /* * Wait for page writeback to be completed. * * Since fuse doesn't rely on the VM writeback tracking, this has to * use some other means. */ static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) { struct fuse_inode *fi = get_fuse_inode(inode); wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); return 0; } static int fuse_flush(struct file *file, fl_owner_t id) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_req *req; struct fuse_flush_in inarg; int err; if (is_bad_inode(inode)) return -EIO; if (fc->no_flush) return 0; req = fuse_get_req_nofail(fc, file); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.lock_owner = fuse_lock_owner_id(fc, id); req->in.h.opcode = FUSE_FLUSH; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->force = 1; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) { fc->no_flush = 1; err = 0; } return err; } /* * Wait for all pending writepages on the inode to finish. * * This is currently done by blocking further writes with FUSE_NOWRITE * and waiting for all sent writes to complete. * * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage * could conflict with truncation. */ static void fuse_sync_writes(struct inode *inode) { fuse_set_nowrite(inode); fuse_release_nowrite(inode); } int fuse_fsync_common(struct file *file, int datasync, int isdir) { struct inode *inode = file->f_mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_req *req; struct fuse_fsync_in inarg; int err; if (is_bad_inode(inode)) return -EIO; if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) return 0; /* * Start writeback against all dirty pages of the inode, then * wait for all outstanding writes, before sending the FSYNC * request. */ err = write_inode_now(inode, 0); if (err) return err; fuse_sync_writes(inode); req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.fsync_flags = datasync ? 1 : 0; req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) { if (isdir) fc->no_fsyncdir = 1; else fc->no_fsync = 1; err = 0; } return err; } static int fuse_fsync(struct file *file, int datasync) { return fuse_fsync_common(file, datasync, 0); } void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, size_t count, int opcode) { struct fuse_read_in *inarg = &req->misc.read.in; struct fuse_file *ff = file->private_data; inarg->fh = ff->fh; inarg->offset = pos; inarg->size = count; inarg->flags = file->f_flags; req->in.h.opcode = opcode; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(struct fuse_read_in); req->in.args[0].value = inarg; req->out.argvar = 1; req->out.numargs = 1; req->out.args[0].size = count; } static size_t fuse_send_read(struct fuse_req *req, struct file *file, loff_t pos, size_t count, fl_owner_t owner) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; fuse_read_fill(req, file, pos, count, FUSE_READ); if (owner != NULL) { struct fuse_read_in *inarg = &req->misc.read.in; inarg->read_flags |= FUSE_READ_LOCKOWNER; inarg->lock_owner = fuse_lock_owner_id(fc, owner); } fuse_request_send(fc, req); return req->out.args[0].size; } static void fuse_read_update_size(struct inode *inode, loff_t size, u64 attr_ver) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fc->lock); if (attr_ver == fi->attr_version && size < inode->i_size) { fi->attr_version = ++fc->attr_version; i_size_write(inode, size); } spin_unlock(&fc->lock); } static int fuse_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; size_t num_read; loff_t pos = page_offset(page); size_t count = PAGE_CACHE_SIZE; u64 attr_ver; int err; err = -EIO; if (is_bad_inode(inode)) goto out; /* * Page writeback can extend beyond the liftime of the * page-cache page, so make sure we read a properly synced * page. */ fuse_wait_on_page_writeback(inode, page->index); req = fuse_get_req(fc); err = PTR_ERR(req); if (IS_ERR(req)) goto out; attr_ver = fuse_get_attr_version(fc); req->out.page_zeroing = 1; req->out.argpages = 1; req->num_pages = 1; req->pages[0] = page; num_read = fuse_send_read(req, file, pos, count, NULL); err = req->out.h.error; fuse_put_request(fc, req); if (!err) { /* * Short read means EOF. If file size is larger, truncate it */ if (num_read < count) fuse_read_update_size(inode, pos + num_read, attr_ver); SetPageUptodate(page); } fuse_invalidate_attr(inode); /* atime changed */ out: unlock_page(page); return err; } static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) { int i; size_t count = req->misc.read.in.size; size_t num_read = req->out.args[0].size; struct address_space *mapping = NULL; for (i = 0; mapping == NULL && i < req->num_pages; i++) mapping = req->pages[i]->mapping; if (mapping) { struct inode *inode = mapping->host; /* * Short read means EOF. If file size is larger, truncate it */ if (!req->out.h.error && num_read < count) { loff_t pos; pos = page_offset(req->pages[0]) + num_read; fuse_read_update_size(inode, pos, req->misc.read.attr_ver); } fuse_invalidate_attr(inode); /* atime changed */ } for (i = 0; i < req->num_pages; i++) { struct page *page = req->pages[i]; if (!req->out.h.error) SetPageUptodate(page); else SetPageError(page); unlock_page(page); page_cache_release(page); } if (req->ff) fuse_file_put(req->ff, false); } static void fuse_send_readpages(struct fuse_req *req, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; loff_t pos = page_offset(req->pages[0]); size_t count = req->num_pages << PAGE_CACHE_SHIFT; req->out.argpages = 1; req->out.page_zeroing = 1; req->out.page_replace = 1; fuse_read_fill(req, file, pos, count, FUSE_READ); req->misc.read.attr_ver = fuse_get_attr_version(fc); if (fc->async_read) { req->ff = fuse_file_get(ff); req->end = fuse_readpages_end; fuse_request_send_background(fc, req); } else { fuse_request_send(fc, req); fuse_readpages_end(fc, req); fuse_put_request(fc, req); } } struct fuse_fill_data { struct fuse_req *req; struct file *file; struct inode *inode; }; static int fuse_readpages_fill(void *_data, struct page *page) { struct fuse_fill_data *data = _data; struct fuse_req *req = data->req; struct inode *inode = data->inode; struct fuse_conn *fc = get_fuse_conn(inode); fuse_wait_on_page_writeback(inode, page->index); if (req->num_pages && (req->num_pages == FUSE_MAX_PAGES_PER_REQ || (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || req->pages[req->num_pages - 1]->index + 1 != page->index)) { fuse_send_readpages(req, data->file); data->req = req = fuse_get_req(fc); if (IS_ERR(req)) { unlock_page(page); return PTR_ERR(req); } } page_cache_get(page); req->pages[req->num_pages] = page; req->num_pages++; return 0; } static int fuse_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_fill_data data; int err; err = -EIO; if (is_bad_inode(inode)) goto out; data.file = file; data.inode = inode; data.req = fuse_get_req(fc); err = PTR_ERR(data.req); if (IS_ERR(data.req)) goto out; err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); if (!err) { if (data.req->num_pages) fuse_send_readpages(data.req, file); else fuse_put_request(fc, data.req); } out: return err; } static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_mapping->host; if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { int err; /* * If trying to read past EOF, make sure the i_size * attribute is up-to-date. */ err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); if (err) return err; } return generic_file_aio_read(iocb, iov, nr_segs, pos); } static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, loff_t pos, size_t count) { struct fuse_write_in *inarg = &req->misc.write.in; struct fuse_write_out *outarg = &req->misc.write.out; inarg->fh = ff->fh; inarg->offset = pos; inarg->size = count; req->in.h.opcode = FUSE_WRITE; req->in.h.nodeid = ff->nodeid; req->in.numargs = 2; if (ff->fc->minor < 9) req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; else req->in.args[0].size = sizeof(struct fuse_write_in); req->in.args[0].value = inarg; req->in.args[1].size = count; req->out.numargs = 1; req->out.args[0].size = sizeof(struct fuse_write_out); req->out.args[0].value = outarg; } static size_t fuse_send_write(struct fuse_req *req, struct file *file, loff_t pos, size_t count, fl_owner_t owner) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; struct fuse_write_in *inarg = &req->misc.write.in; fuse_write_fill(req, ff, pos, count); inarg->flags = file->f_flags; if (owner != NULL) { inarg->write_flags |= FUSE_WRITE_LOCKOWNER; inarg->lock_owner = fuse_lock_owner_id(fc, owner); } fuse_request_send(fc, req); return req->misc.write.out.size; } static int fuse_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; } static void fuse_write_update_size(struct inode *inode, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->attr_version = ++fc->attr_version; if (pos > inode->i_size) i_size_write(inode, pos); spin_unlock(&fc->lock); } static int fuse_buffered_write(struct file *file, struct inode *inode, loff_t pos, unsigned count, struct page *page) { int err; size_t nres; struct fuse_conn *fc = get_fuse_conn(inode); unsigned offset = pos & (PAGE_CACHE_SIZE - 1); struct fuse_req *req; if (is_bad_inode(inode)) return -EIO; /* * Make sure writepages on the same page are not mixed up with * plain writes. */ fuse_wait_on_page_writeback(inode, page->index); req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); req->in.argpages = 1; req->num_pages = 1; req->pages[0] = page; req->page_offset = offset; nres = fuse_send_write(req, file, pos, count, NULL); err = req->out.h.error; fuse_put_request(fc, req); if (!err && !nres) err = -EIO; if (!err) { pos += nres; fuse_write_update_size(inode, pos); if (count == PAGE_CACHE_SIZE) SetPageUptodate(page); } fuse_invalidate_attr(inode); return err ? err : nres; } static int fuse_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int res = 0; if (copied) res = fuse_buffered_write(file, inode, pos, copied, page); unlock_page(page); page_cache_release(page); return res; } static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, struct inode *inode, loff_t pos, size_t count) { size_t res; unsigned offset; unsigned i; for (i = 0; i < req->num_pages; i++) fuse_wait_on_page_writeback(inode, req->pages[i]->index); res = fuse_send_write(req, file, pos, count, NULL); offset = req->page_offset; count = res; for (i = 0; i < req->num_pages; i++) { struct page *page = req->pages[i]; if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) SetPageUptodate(page); if (count > PAGE_CACHE_SIZE - offset) count -= PAGE_CACHE_SIZE - offset; else count = 0; offset = 0; unlock_page(page); page_cache_release(page); } return res; } static ssize_t fuse_fill_write_pages(struct fuse_req *req, struct address_space *mapping, struct iov_iter *ii, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(mapping->host); unsigned offset = pos & (PAGE_CACHE_SIZE - 1); size_t count = 0; int err; req->in.argpages = 1; req->page_offset = offset; do { size_t tmp; struct page *page; pgoff_t index = pos >> PAGE_CACHE_SHIFT; size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, iov_iter_count(ii)); bytes = min_t(size_t, bytes, fc->max_write - count); again: err = -EFAULT; if (iov_iter_fault_in_readable(ii, bytes)) break; err = -ENOMEM; page = grab_cache_page_write_begin(mapping, index, 0); if (!page) break; if (mapping_writably_mapped(mapping)) flush_dcache_page(page); pagefault_disable(); tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); pagefault_enable(); flush_dcache_page(page); if (!tmp) { unlock_page(page); page_cache_release(page); bytes = min(bytes, iov_iter_single_seg_count(ii)); goto again; } err = 0; req->pages[req->num_pages] = page; req->num_pages++; iov_iter_advance(ii, tmp); count += tmp; pos += tmp; offset += tmp; if (offset == PAGE_CACHE_SIZE) offset = 0; if (!fc->big_writes) break; } while (iov_iter_count(ii) && count < fc->max_write && req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); return count > 0 ? count : err; } static ssize_t fuse_perform_write(struct file *file, struct address_space *mapping, struct iov_iter *ii, loff_t pos) { struct inode *inode = mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); int err = 0; ssize_t res = 0; if (is_bad_inode(inode)) return -EIO; do { struct fuse_req *req; ssize_t count; req = fuse_get_req(fc); if (IS_ERR(req)) { err = PTR_ERR(req); break; } count = fuse_fill_write_pages(req, mapping, ii, pos); if (count <= 0) { err = count; } else { size_t num_written; num_written = fuse_send_write_pages(req, file, inode, pos, count); err = req->out.h.error; if (!err) { res += num_written; pos += num_written; /* break out of the loop on short write */ if (num_written != count) err = -EIO; } } fuse_put_request(fc, req); } while (!err && iov_iter_count(ii)); if (res > 0) fuse_write_update_size(inode, pos); fuse_invalidate_attr(inode); return res > 0 ? res : err; } static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; size_t count = 0; ssize_t written = 0; struct inode *inode = mapping->host; ssize_t err; struct iov_iter i; WARN_ON(iocb->ki_pos != pos); err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); if (err) return err; mutex_lock(&inode->i_mutex); vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (count == 0) goto out; err = file_remove_suid(file); if (err) goto out; file_update_time(file); iov_iter_init(&i, iov, nr_segs, count, 0); written = fuse_perform_write(file, mapping, &i, pos); if (written >= 0) iocb->ki_pos = pos + written; out: current->backing_dev_info = NULL; mutex_unlock(&inode->i_mutex); return written ? written : err; } static void fuse_release_user_pages(struct fuse_req *req, int write) { unsigned i; for (i = 0; i < req->num_pages; i++) { struct page *page = req->pages[i]; if (write) set_page_dirty_lock(page); put_page(page); } } static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, size_t *nbytesp, int write) { size_t nbytes = *nbytesp; unsigned long user_addr = (unsigned long) buf; unsigned offset = user_addr & ~PAGE_MASK; int npages; /* Special case for kernel I/O: can copy directly into the buffer */ if (segment_eq(get_fs(), KERNEL_DS)) { if (write) req->in.args[1].value = (void *) user_addr; else req->out.args[0].value = (void *) user_addr; return 0; } nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); npages = get_user_pages_fast(user_addr, npages, !write, req->pages); if (npages < 0) return npages; req->num_pages = npages; req->page_offset = offset; if (write) req->in.argpages = 1; else req->out.argpages = 1; nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; *nbytesp = min(*nbytesp, nbytes); return 0; } ssize_t fuse_direct_io(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int write) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; size_t nmax = write ? fc->max_write : fc->max_read; loff_t pos = *ppos; ssize_t res = 0; struct fuse_req *req; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); while (count) { size_t nres; fl_owner_t owner = current->files; size_t nbytes = min(count, nmax); int err = fuse_get_user_pages(req, buf, &nbytes, write); if (err) { res = err; break; } if (write) nres = fuse_send_write(req, file, pos, nbytes, owner); else nres = fuse_send_read(req, file, pos, nbytes, owner); fuse_release_user_pages(req, !write); if (req->out.h.error) { if (!res) res = req->out.h.error; break; } else if (nres > nbytes) { res = -EIO; break; } count -= nres; res += nres; pos += nres; buf += nres; if (nres != nbytes) break; if (count) { fuse_put_request(fc, req); req = fuse_get_req(fc); if (IS_ERR(req)) break; } } if (!IS_ERR(req)) fuse_put_request(fc, req); if (res > 0) *ppos = pos; return res; } EXPORT_SYMBOL_GPL(fuse_direct_io); static ssize_t fuse_direct_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { ssize_t res; struct inode *inode = file->f_path.dentry->d_inode; if (is_bad_inode(inode)) return -EIO; res = fuse_direct_io(file, buf, count, ppos, 0); fuse_invalidate_attr(inode); return res; } static ssize_t fuse_direct_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file->f_path.dentry->d_inode; ssize_t res; if (is_bad_inode(inode)) return -EIO; /* Don't allow parallel writes to the same file */ mutex_lock(&inode->i_mutex); res = generic_write_checks(file, ppos, &count, 0); if (!res) { res = fuse_direct_io(file, buf, count, ppos, 1); if (res > 0) fuse_write_update_size(inode, *ppos); } mutex_unlock(&inode->i_mutex); fuse_invalidate_attr(inode); return res; } static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) { __free_page(req->pages[0]); fuse_file_put(req->ff, false); } static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) { struct inode *inode = req->inode; struct fuse_inode *fi = get_fuse_inode(inode); struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; list_del(&req->writepages_entry); dec_bdi_stat(bdi, BDI_WRITEBACK); dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); bdi_writeout_inc(bdi); wake_up(&fi->page_waitq); } /* Called under fc->lock, may release and reacquire it */ static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) __releases(&fc->lock) __acquires(&fc->lock) { struct fuse_inode *fi = get_fuse_inode(req->inode); loff_t size = i_size_read(req->inode); struct fuse_write_in *inarg = &req->misc.write.in; if (!fc->connected) goto out_free; if (inarg->offset + PAGE_CACHE_SIZE <= size) { inarg->size = PAGE_CACHE_SIZE; } else if (inarg->offset < size) { inarg->size = size & (PAGE_CACHE_SIZE - 1); } else { /* Got truncated off completely */ goto out_free; } req->in.args[1].size = inarg->size; fi->writectr++; fuse_request_send_background_locked(fc, req); return; out_free: fuse_writepage_finish(fc, req); spin_unlock(&fc->lock); fuse_writepage_free(fc, req); fuse_put_request(fc, req); spin_lock(&fc->lock); } /* * If fi->writectr is positive (no truncate or fsync going on) send * all queued writepage requests. * * Called with fc->lock */ void fuse_flush_writepages(struct inode *inode) __releases(&fc->lock) __acquires(&fc->lock) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_req *req; while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { req = list_entry(fi->queued_writes.next, struct fuse_req, list); list_del_init(&req->list); fuse_send_writepage(fc, req); } } static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) { struct inode *inode = req->inode; struct fuse_inode *fi = get_fuse_inode(inode); mapping_set_error(inode->i_mapping, req->out.h.error); spin_lock(&fc->lock); fi->writectr--; fuse_writepage_finish(fc, req); spin_unlock(&fc->lock); fuse_writepage_free(fc, req); } static int fuse_writepage_locked(struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_req *req; struct fuse_file *ff; struct page *tmp_page; set_page_writeback(page); req = fuse_request_alloc_nofs(); if (!req) goto err; tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!tmp_page) goto err_free; spin_lock(&fc->lock); BUG_ON(list_empty(&fi->write_files)); ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); req->ff = fuse_file_get(ff); spin_unlock(&fc->lock); fuse_write_fill(req, ff, page_offset(page), 0); copy_highpage(tmp_page, page); req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; req->in.argpages = 1; req->num_pages = 1; req->pages[0] = tmp_page; req->page_offset = 0; req->end = fuse_writepage_end; req->inode = inode; inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); end_page_writeback(page); spin_lock(&fc->lock); list_add(&req->writepages_entry, &fi->writepages); list_add_tail(&req->list, &fi->queued_writes); fuse_flush_writepages(inode); spin_unlock(&fc->lock); return 0; err_free: fuse_request_free(req); err: end_page_writeback(page); return -ENOMEM; } static int fuse_writepage(struct page *page, struct writeback_control *wbc) { int err; err = fuse_writepage_locked(page); unlock_page(page); return err; } static int fuse_launder_page(struct page *page) { int err = 0; if (clear_page_dirty_for_io(page)) { struct inode *inode = page->mapping->host; err = fuse_writepage_locked(page); if (!err) fuse_wait_on_page_writeback(inode, page->index); } return err; } /* * Write back dirty pages now, because there may not be any suitable * open files later */ static void fuse_vma_close(struct vm_area_struct *vma) { filemap_write_and_wait(vma->vm_file->f_mapping); } /* * Wait for writeback against this page to complete before allowing it * to be marked dirty again, and hence written back again, possibly * before the previous writepage completed. * * Block here, instead of in ->writepage(), so that the userspace fs * can only block processes actually operating on the filesystem. * * Otherwise unprivileged userspace fs would be able to block * unrelated: * * - page migration * - sync(2) * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER */ static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; /* * Don't use page->mapping as it may become NULL from a * concurrent truncate. */ struct inode *inode = vma->vm_file->f_mapping->host; fuse_wait_on_page_writeback(inode, page->index); return 0; } static const struct vm_operations_struct fuse_file_vm_ops = { .close = fuse_vma_close, .fault = filemap_fault, .page_mkwrite = fuse_page_mkwrite, }; static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { struct inode *inode = file->f_dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_file *ff = file->private_data; /* * file may be written through mmap, so chain it onto the * inodes's write_file list */ spin_lock(&fc->lock); if (list_empty(&ff->write_entry)) list_add(&ff->write_entry, &fi->write_files); spin_unlock(&fc->lock); } file_accessed(file); vma->vm_ops = &fuse_file_vm_ops; return 0; } static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) { /* Can't provide the coherency needed for MAP_SHARED */ if (vma->vm_flags & VM_MAYSHARE) return -ENODEV; invalidate_inode_pages2(file->f_mapping); return generic_file_mmap(file, vma); } static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, struct file_lock *fl) { switch (ffl->type) { case F_UNLCK: break; case F_RDLCK: case F_WRLCK: if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || ffl->end < ffl->start) return -EIO; fl->fl_start = ffl->start; fl->fl_end = ffl->end; fl->fl_pid = ffl->pid; break; default: return -EIO; } fl->fl_type = ffl->type; return 0; } static void fuse_lk_fill(struct fuse_req *req, struct file *file, const struct file_lock *fl, int opcode, pid_t pid, int flock) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_lk_in *arg = &req->misc.lk_in; arg->fh = ff->fh; arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); arg->lk.start = fl->fl_start; arg->lk.end = fl->fl_end; arg->lk.type = fl->fl_type; arg->lk.pid = pid; if (flock) arg->lk_flags |= FUSE_LK_FLOCK; req->in.h.opcode = opcode; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; } static int fuse_getlk(struct file *file, struct file_lock *fl) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_lk_out outarg; int err; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (!err) err = convert_fuse_file_lock(&outarg.lk, fl); return err; } static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; int err; if (fl->fl_lmops && fl->fl_lmops->fl_grant) { /* NLM needs asynchronous locks, which we don't support yet */ return -ENOLCK; } /* Unlock on close is handled by the flush method */ if (fl->fl_flags & FL_CLOSE) return 0; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); fuse_lk_fill(req, file, fl, opcode, pid, flock); fuse_request_send(fc, req); err = req->out.h.error; /* locking is restartable */ if (err == -EINTR) err = -ERESTARTSYS; fuse_put_request(fc, req); return err; } static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); int err; if (cmd == F_CANCELLK) { err = 0; } else if (cmd == F_GETLK) { if (fc->no_lock) { posix_test_lock(file, fl); err = 0; } else err = fuse_getlk(file, fl); } else { if (fc->no_lock) err = posix_lock_file(file, fl, NULL); else err = fuse_setlk(file, fl, 0); } return err; } static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); int err; if (fc->no_lock) { err = flock_lock_file_wait(file, fl); } else { /* emulate flock with POSIX locks */ fl->fl_owner = (fl_owner_t) file; err = fuse_setlk(file, fl, 1); } return err; } static sector_t fuse_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_bmap_in inarg; struct fuse_bmap_out outarg; int err; if (!inode->i_sb->s_bdev || fc->no_bmap) return 0; req = fuse_get_req(fc); if (IS_ERR(req)) return 0; memset(&inarg, 0, sizeof(inarg)); inarg.block = block; inarg.blocksize = inode->i_sb->s_blocksize; req->in.h.opcode = FUSE_BMAP; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) fc->no_bmap = 1; return err ? 0 : outarg.block; } static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) { loff_t retval; struct inode *inode = file->f_path.dentry->d_inode; mutex_lock(&inode->i_mutex); switch (origin) { case SEEK_END: retval = fuse_update_attributes(inode, NULL, file, NULL); if (retval) goto exit; offset += i_size_read(inode); break; case SEEK_CUR: offset += file->f_pos; } retval = -EINVAL; if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; } retval = offset; } exit: mutex_unlock(&inode->i_mutex); return retval; } static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, unsigned int nr_segs, size_t bytes, bool to_user) { struct iov_iter ii; int page_idx = 0; if (!bytes) return 0; iov_iter_init(&ii, iov, nr_segs, bytes, 0); while (iov_iter_count(&ii)) { struct page *page = pages[page_idx++]; size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); void *kaddr; kaddr = kmap(page); while (todo) { char __user *uaddr = ii.iov->iov_base + ii.iov_offset; size_t iov_len = ii.iov->iov_len - ii.iov_offset; size_t copy = min(todo, iov_len); size_t left; if (!to_user) left = copy_from_user(kaddr, uaddr, copy); else left = copy_to_user(uaddr, kaddr, copy); if (unlikely(left)) return -EFAULT; iov_iter_advance(&ii, copy); todo -= copy; kaddr += copy; } kunmap(page); } return 0; } /* Make sure iov_length() won't overflow */ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) { size_t n; u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; for (n = 0; n < count; n++) { if (iov->iov_len > (size_t) max) return -ENOMEM; max -= iov->iov_len; } return 0; } /* * CUSE servers compiled on 32bit broke on 64bit kernels because the * ABI was defined to be 'struct iovec' which is different on 32bit * and 64bit. Fortunately we can determine which structure the server * used from the size of the reply. */ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, size_t transferred, unsigned count, bool is_compat) { #ifdef CONFIG_COMPAT if (count * sizeof(struct compat_iovec) == transferred) { struct compat_iovec *ciov = src; unsigned i; /* * With this interface a 32bit server cannot support * non-compat (i.e. ones coming from 64bit apps) ioctl * requests */ if (!is_compat) return -EINVAL; for (i = 0; i < count; i++) { dst[i].iov_base = compat_ptr(ciov[i].iov_base); dst[i].iov_len = ciov[i].iov_len; } return 0; } #endif if (count * sizeof(struct iovec) != transferred) return -EIO; memcpy(dst, src, transferred); return 0; } /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed * to dereference the passed pointer, so the parameter requires deep * copying but FUSE has no idea whatsoever about what to copy in or * out. * * This is solved by allowing FUSE server to retry ioctl with * necessary in/out iovecs. Let's assume the ioctl implementation * needs to read in the following structure. * * struct a { * char *buf; * size_t buflen; * } * * On the first callout to FUSE server, inarg->in_size and * inarg->out_size will be NULL; then, the server completes the ioctl * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and * the actual iov array to * * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } * * which tells FUSE to copy in the requested area and retry the ioctl. * On the second round, the server has access to the structure and * from that it can tell what to look for next, so on the invocation, * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to * * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, * { .iov_base = a.buf, .iov_len = a.buflen } } * * FUSE will copy both struct a and the pointed buffer from the * process doing the ioctl and retry ioctl with both struct a and the * buffer. * * This time, FUSE server has everything it needs and completes ioctl * without FUSE_IOCTL_RETRY which finishes the ioctl call. * * Copying data out works the same way. * * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel * automatically initializes in and out iovs by decoding @cmd with * _IOC_* macros and the server is not allowed to request RETRY. This * limits ioctl data transfers to well-formed ioctls and is the forced * behavior for all FUSE servers. */ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; struct fuse_ioctl_in inarg = { .fh = ff->fh, .cmd = cmd, .arg = arg, .flags = flags }; struct fuse_ioctl_out outarg; struct fuse_req *req = NULL; struct page **pages = NULL; struct page *iov_page = NULL; struct iovec *in_iov = NULL, *out_iov = NULL; unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; size_t in_size, out_size, transferred; int err; /* assume all the iovs returned by client always fits in a page */ BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); iov_page = alloc_page(GFP_KERNEL); if (!pages || !iov_page) goto out; /* * If restricted, initialize IO parameters as encoded in @cmd. * RETRY from server is not allowed. */ if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { struct iovec *iov = page_address(iov_page); iov->iov_base = (void __user *)arg; iov->iov_len = _IOC_SIZE(cmd); if (_IOC_DIR(cmd) & _IOC_WRITE) { in_iov = iov; in_iovs = 1; } if (_IOC_DIR(cmd) & _IOC_READ) { out_iov = iov; out_iovs = 1; } } retry: inarg.in_size = in_size = iov_length(in_iov, in_iovs); inarg.out_size = out_size = iov_length(out_iov, out_iovs); /* * Out data can be used either for actual out data or iovs, * make sure there always is at least one page. */ out_size = max_t(size_t, out_size, PAGE_SIZE); max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); /* make sure there are enough buffer pages and init request with them */ err = -ENOMEM; if (max_pages > FUSE_MAX_PAGES_PER_REQ) goto out; while (num_pages < max_pages) { pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!pages[num_pages]) goto out; num_pages++; } req = fuse_get_req(fc); if (IS_ERR(req)) { err = PTR_ERR(req); req = NULL; goto out; } memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); req->num_pages = num_pages; /* okay, let's send it to the client */ req->in.h.opcode = FUSE_IOCTL; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; if (in_size) { req->in.numargs++; req->in.args[1].size = in_size; req->in.argpages = 1; err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, false); if (err) goto out; } req->out.numargs = 2; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; req->out.args[1].size = out_size; req->out.argpages = 1; req->out.argvar = 1; fuse_request_send(fc, req); err = req->out.h.error; transferred = req->out.args[1].size; fuse_put_request(fc, req); req = NULL; if (err) goto out; /* did it ask for retry? */ if (outarg.flags & FUSE_IOCTL_RETRY) { char *vaddr; /* no retry if in restricted mode */ err = -EIO; if (!(flags & FUSE_IOCTL_UNRESTRICTED)) goto out; in_iovs = outarg.in_iovs; out_iovs = outarg.out_iovs; /* * Make sure things are in boundary, separate checks * are to protect against overflow. */ err = -ENOMEM; if (in_iovs > FUSE_IOCTL_MAX_IOV || out_iovs > FUSE_IOCTL_MAX_IOV || in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) goto out; vaddr = kmap_atomic(pages[0], KM_USER0); err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); kunmap_atomic(vaddr, KM_USER0); if (err) goto out; in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; err = fuse_verify_ioctl_iov(in_iov, in_iovs); if (err) goto out; err = fuse_verify_ioctl_iov(out_iov, out_iovs); if (err) goto out; goto retry; } err = -EIO; if (transferred > inarg.out_size) goto out; err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); out: if (req) fuse_put_request(fc, req); if (iov_page) __free_page(iov_page); while (num_pages) __free_page(pages[--num_pages]); kfree(pages); return err ? err : outarg.result; } EXPORT_SYMBOL_GPL(fuse_do_ioctl); static long fuse_file_ioctl_common(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags) { struct inode *inode = file->f_dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); if (!fuse_allow_task(fc, current)) return -EACCES; if (is_bad_inode(inode)) return -EIO; return fuse_do_ioctl(file, cmd, arg, flags); } static long fuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return fuse_file_ioctl_common(file, cmd, arg, 0); } static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); } /* * All files which have been polled are linked to RB tree * fuse_conn->polled_files which is indexed by kh. Walk the tree and * find the matching one. */ static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, struct rb_node **parent_out) { struct rb_node **link = &fc->polled_files.rb_node; struct rb_node *last = NULL; while (*link) { struct fuse_file *ff; last = *link; ff = rb_entry(last, struct fuse_file, polled_node); if (kh < ff->kh) link = &last->rb_left; else if (kh > ff->kh) link = &last->rb_right; else return link; } if (parent_out) *parent_out = last; return link; } /* * The file is about to be polled. Make sure it's on the polled_files * RB tree. Note that files once added to the polled_files tree are * not removed before the file is released. This is because a file * polled once is likely to be polled again. */ static void fuse_register_polled_file(struct fuse_conn *fc, struct fuse_file *ff) { spin_lock(&fc->lock); if (RB_EMPTY_NODE(&ff->polled_node)) { struct rb_node **link, *parent; link = fuse_find_polled_node(fc, ff->kh, &parent); BUG_ON(*link); rb_link_node(&ff->polled_node, parent, link); rb_insert_color(&ff->polled_node, &fc->polled_files); } spin_unlock(&fc->lock); } unsigned fuse_file_poll(struct file *file, poll_table *wait) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; struct fuse_poll_out outarg; struct fuse_req *req; int err; if (fc->no_poll) return DEFAULT_POLLMASK; poll_wait(file, &ff->poll_wait, wait); /* * Ask for notification iff there's someone waiting for it. * The client may ignore the flag and always notify. */ if (waitqueue_active(&ff->poll_wait)) { inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; fuse_register_polled_file(fc, ff); } req = fuse_get_req(fc); if (IS_ERR(req)) return POLLERR; req->in.h.opcode = FUSE_POLL; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (!err) return outarg.revents; if (err == -ENOSYS) { fc->no_poll = 1; return DEFAULT_POLLMASK; } return POLLERR; } EXPORT_SYMBOL_GPL(fuse_file_poll); /* * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and * wakes up the poll waiters. */ int fuse_notify_poll_wakeup(struct fuse_conn *fc, struct fuse_notify_poll_wakeup_out *outarg) { u64 kh = outarg->kh; struct rb_node **link; spin_lock(&fc->lock); link = fuse_find_polled_node(fc, kh, NULL); if (*link) { struct fuse_file *ff; ff = rb_entry(*link, struct fuse_file, polled_node); wake_up_interruptible_sync(&ff->poll_wait); } spin_unlock(&fc->lock); return 0; } static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, .read = do_sync_read, .aio_read = fuse_file_aio_read, .write = do_sync_write, .aio_write = fuse_file_aio_write, .mmap = fuse_file_mmap, .open = fuse_open, .flush = fuse_flush, .release = fuse_release, .fsync = fuse_fsync, .lock = fuse_file_lock, .flock = fuse_file_flock, .splice_read = generic_file_splice_read, .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, .poll = fuse_file_poll, }; static const struct file_operations fuse_direct_io_file_operations = { .llseek = fuse_file_llseek, .read = fuse_direct_read, .write = fuse_direct_write, .mmap = fuse_direct_mmap, .open = fuse_open, .flush = fuse_flush, .release = fuse_release, .fsync = fuse_fsync, .lock = fuse_file_lock, .flock = fuse_file_flock, .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, .poll = fuse_file_poll, /* no splice_read */ }; static const struct address_space_operations fuse_file_aops = { .readpage = fuse_readpage, .writepage = fuse_writepage, .launder_page = fuse_launder_page, .write_begin = fuse_write_begin, .write_end = fuse_write_end, .readpages = fuse_readpages, .set_page_dirty = __set_page_dirty_nobuffers, .bmap = fuse_bmap, }; void fuse_init_file_inode(struct inode *inode) { inode->i_fop = &fuse_file_operations; inode->i_data.a_ops = &fuse_file_aops; }
gpl-2.0
kylon/Buzz-kernel
arch/arm/mach-omap2/mcbsp.c
453
6889
/* * linux/arch/arm/mach-omap2/mcbsp.c * * Copyright (C) 2008 Instituto Nokia de Tecnologia * Contact: Eduardo Valentin <eduardo.valentin@indt.org.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Multichannel mode not supported. */ #include <linux/module.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mux.h> #include <mach/cpu.h> #include <mach/mcbsp.h> static void omap2_mcbsp2_mux_setup(void) { omap_cfg_reg(Y15_24XX_MCBSP2_CLKX); omap_cfg_reg(R14_24XX_MCBSP2_FSX); omap_cfg_reg(W15_24XX_MCBSP2_DR); omap_cfg_reg(V15_24XX_MCBSP2_DX); omap_cfg_reg(V14_24XX_GPIO117); /* * TODO: Need to add MUX settings for OMAP 2430 SDP */ } static void omap2_mcbsp_request(unsigned int id) { if (cpu_is_omap2420() && (id == OMAP_MCBSP2)) omap2_mcbsp2_mux_setup(); } static struct omap_mcbsp_ops omap2_mcbsp_ops = { .request = omap2_mcbsp_request, }; #ifdef CONFIG_ARCH_OMAP2420 static struct omap_mcbsp_platform_data omap2420_mcbsp_pdata[] = { { .phys_base = OMAP24XX_MCBSP1_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, .rx_irq = INT_24XX_MCBSP1_IRQ_RX, .tx_irq = INT_24XX_MCBSP1_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP24XX_MCBSP2_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, .rx_irq = INT_24XX_MCBSP2_IRQ_RX, .tx_irq = INT_24XX_MCBSP2_IRQ_TX, .ops = &omap2_mcbsp_ops, }, }; #define OMAP2420_MCBSP_PDATA_SZ ARRAY_SIZE(omap2420_mcbsp_pdata) #else #define omap2420_mcbsp_pdata NULL #define OMAP2420_MCBSP_PDATA_SZ 0 #endif #ifdef CONFIG_ARCH_OMAP2430 static struct omap_mcbsp_platform_data omap2430_mcbsp_pdata[] = { { .phys_base = OMAP24XX_MCBSP1_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, .rx_irq = INT_24XX_MCBSP1_IRQ_RX, .tx_irq = INT_24XX_MCBSP1_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP24XX_MCBSP2_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, .rx_irq = INT_24XX_MCBSP2_IRQ_RX, .tx_irq = INT_24XX_MCBSP2_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP2430_MCBSP3_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP3_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP3_TX, .rx_irq = INT_24XX_MCBSP3_IRQ_RX, .tx_irq = INT_24XX_MCBSP3_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP2430_MCBSP4_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP4_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP4_TX, .rx_irq = INT_24XX_MCBSP4_IRQ_RX, .tx_irq = INT_24XX_MCBSP4_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP2430_MCBSP5_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP5_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP5_TX, .rx_irq = INT_24XX_MCBSP5_IRQ_RX, .tx_irq = INT_24XX_MCBSP5_IRQ_TX, .ops = &omap2_mcbsp_ops, }, }; #define OMAP2430_MCBSP_PDATA_SZ ARRAY_SIZE(omap2430_mcbsp_pdata) #else #define omap2430_mcbsp_pdata NULL #define OMAP2430_MCBSP_PDATA_SZ 0 #endif #ifdef CONFIG_ARCH_OMAP34XX static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { { .phys_base = OMAP34XX_MCBSP1_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, .rx_irq = INT_24XX_MCBSP1_IRQ_RX, .tx_irq = INT_24XX_MCBSP1_IRQ_TX, .ops = &omap2_mcbsp_ops, .buffer_size = 0x6F, }, { .phys_base = OMAP34XX_MCBSP2_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, .rx_irq = INT_24XX_MCBSP2_IRQ_RX, .tx_irq = INT_24XX_MCBSP2_IRQ_TX, .ops = &omap2_mcbsp_ops, .buffer_size = 0x3FF, }, { .phys_base = OMAP34XX_MCBSP3_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP3_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP3_TX, .rx_irq = INT_24XX_MCBSP3_IRQ_RX, .tx_irq = INT_24XX_MCBSP3_IRQ_TX, .ops = &omap2_mcbsp_ops, .buffer_size = 0x6F, }, { .phys_base = OMAP34XX_MCBSP4_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP4_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP4_TX, .rx_irq = INT_24XX_MCBSP4_IRQ_RX, .tx_irq = INT_24XX_MCBSP4_IRQ_TX, .ops = &omap2_mcbsp_ops, .buffer_size = 0x6F, }, { .phys_base = OMAP34XX_MCBSP5_BASE, .dma_rx_sync = OMAP24XX_DMA_MCBSP5_RX, .dma_tx_sync = OMAP24XX_DMA_MCBSP5_TX, .rx_irq = INT_24XX_MCBSP5_IRQ_RX, .tx_irq = INT_24XX_MCBSP5_IRQ_TX, .ops = &omap2_mcbsp_ops, .buffer_size = 0x6F, }, }; #define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) #else #define omap34xx_mcbsp_pdata NULL #define OMAP34XX_MCBSP_PDATA_SZ 0 #endif static struct omap_mcbsp_platform_data omap44xx_mcbsp_pdata[] = { { .phys_base = OMAP44XX_MCBSP1_BASE, .dma_rx_sync = OMAP44XX_DMA_MCBSP1_RX, .dma_tx_sync = OMAP44XX_DMA_MCBSP1_TX, .rx_irq = INT_24XX_MCBSP1_IRQ_RX, .tx_irq = INT_24XX_MCBSP1_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP44XX_MCBSP2_BASE, .dma_rx_sync = OMAP44XX_DMA_MCBSP2_RX, .dma_tx_sync = OMAP44XX_DMA_MCBSP2_TX, .rx_irq = INT_24XX_MCBSP2_IRQ_RX, .tx_irq = INT_24XX_MCBSP2_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP44XX_MCBSP3_BASE, .dma_rx_sync = OMAP44XX_DMA_MCBSP3_RX, .dma_tx_sync = OMAP44XX_DMA_MCBSP3_TX, .rx_irq = INT_24XX_MCBSP3_IRQ_RX, .tx_irq = INT_24XX_MCBSP3_IRQ_TX, .ops = &omap2_mcbsp_ops, }, { .phys_base = OMAP44XX_MCBSP4_BASE, .dma_rx_sync = OMAP44XX_DMA_MCBSP4_RX, .dma_tx_sync = OMAP44XX_DMA_MCBSP4_TX, .rx_irq = INT_24XX_MCBSP4_IRQ_RX, .tx_irq = INT_24XX_MCBSP4_IRQ_TX, .ops = &omap2_mcbsp_ops, }, }; #define OMAP44XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap44xx_mcbsp_pdata) static int __init omap2_mcbsp_init(void) { if (cpu_is_omap2420()) omap_mcbsp_count = OMAP2420_MCBSP_PDATA_SZ; if (cpu_is_omap2430()) omap_mcbsp_count = OMAP2430_MCBSP_PDATA_SZ; if (cpu_is_omap34xx()) omap_mcbsp_count = OMAP34XX_MCBSP_PDATA_SZ; if (cpu_is_omap44xx()) omap_mcbsp_count = OMAP44XX_MCBSP_PDATA_SZ; mcbsp_ptr = kzalloc(omap_mcbsp_count * sizeof(struct omap_mcbsp *), GFP_KERNEL); if (!mcbsp_ptr) return -ENOMEM; if (cpu_is_omap2420()) omap_mcbsp_register_board_cfg(omap2420_mcbsp_pdata, OMAP2420_MCBSP_PDATA_SZ); if (cpu_is_omap2430()) omap_mcbsp_register_board_cfg(omap2430_mcbsp_pdata, OMAP2430_MCBSP_PDATA_SZ); if (cpu_is_omap34xx()) omap_mcbsp_register_board_cfg(omap34xx_mcbsp_pdata, OMAP34XX_MCBSP_PDATA_SZ); if (cpu_is_omap44xx()) omap_mcbsp_register_board_cfg(omap44xx_mcbsp_pdata, OMAP44XX_MCBSP_PDATA_SZ); return omap_mcbsp_init(); } arch_initcall(omap2_mcbsp_init);
gpl-2.0
pastcompute/openwrt
package/kernel/lantiq/ltq-deu/src/ifxmips_arc4.c
453
13082
/****************************************************************************** ** ** FILE NAME : ifxmips_arc4.c ** PROJECT : IFX UEIP ** MODULES : DEU Module ** ** DATE : September 8, 2009 ** AUTHOR : Mohammad Firdaus ** DESCRIPTION : Data Encryption Unit Driver for ARC4 Algorithm ** COPYRIGHT : Copyright (c) 2009 ** Infineon Technologies AG ** Am Campeon 1-12, 85579 Neubiberg, Germany ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** HISTORY ** $Date $Author $Comment ** 08 Sept 2009 Mohammad Firdaus Initial UEIP release *******************************************************************************/ /*! \defgroup IFX_DEU IFX_DEU_DRIVERS \ingroup API \brief ifx deu driver module */ /*! \file ifxmips_arc4.c \ingroup IFX_DEU \brief ARC4 encryption DEU driver file */ /*! \defgroup IFX_ARC4_FUNCTIONS IFX_ARC4_FUNCTIONS \ingroup IFX_DEU \brief IFX deu driver functions */ /* Project header */ #include <linux/version.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <linux/interrupt.h> #include <asm/byteorder.h> #include <linux/delay.h> /* Board specific header files */ #ifdef CONFIG_AR9 #include "ifxmips_deu_ar9.h" #endif #ifdef CONFIG_VR9 #include "ifxmips_deu_vr9.h" #endif static spinlock_t lock; #define CRTCL_SECT_INIT spin_lock_init(&lock) #define CRTCL_SECT_START spin_lock_irqsave(&lock, flag) #define CRTCL_SECT_END spin_unlock_irqrestore(&lock, flag) /* Preprocessor declerations */ #define ARC4_MIN_KEY_SIZE 1 //#define ARC4_MAX_KEY_SIZE 256 #define ARC4_MAX_KEY_SIZE 16 #define ARC4_BLOCK_SIZE 1 #define ARC4_START IFX_ARC4_CON #ifdef CRYPTO_DEBUG extern char debug_level; #define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args); #else #define DPRINTF(level, format, args...) #endif /* * \brief arc4 private structure */ struct arc4_ctx { int key_length; u8 buf[120]; }; extern int disable_deudma; extern int disable_multiblock; /*! \fn static void _deu_arc4 (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode) \ingroup IFX_ARC4_FUNCTIONS \brief main interface to ARC4 hardware \param ctx_arg crypto algo context \param out_arg output bytestream \param in_arg input bytestream \param iv_arg initialization vector \param nbytes length of bytestream \param encdec 1 for encrypt; 0 for decrypt \param mode operation mode such as ebc, cbc, ctr */ static void _deu_arc4 (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode) { volatile struct arc4_t *arc4 = (struct arc4_t *) ARC4_START; int i = 0; unsigned long flag; #if 1 // need to handle nbytes not multiple of 16 volatile u32 tmp_array32[4]; volatile u8 *tmp_ptr8; int remaining_bytes, j; #endif CRTCL_SECT_START; arc4->IDLEN = nbytes; #if 1 while (i < nbytes) { arc4->ID3R = *((u32 *) in_arg + (i>>2) + 0); arc4->ID2R = *((u32 *) in_arg + (i>>2) + 1); arc4->ID1R = *((u32 *) in_arg + (i>>2) + 2); arc4->ID0R = *((u32 *) in_arg + (i>>2) + 3); arc4->controlr.GO = 1; while (arc4->controlr.BUS) { // this will not take long } #if 1 // need to handle nbytes not multiple of 16 tmp_array32[0] = arc4->OD3R; tmp_array32[1] = arc4->OD2R; tmp_array32[2] = arc4->OD1R; tmp_array32[3] = arc4->OD0R; remaining_bytes = nbytes - i; if (remaining_bytes > 16) remaining_bytes = 16; tmp_ptr8 = (u8 *)&tmp_array32[0]; for (j = 0; j < remaining_bytes; j++) *out_arg++ = *tmp_ptr8++; #else *((u32 *) out_arg + (i>>2) + 0) = arc4->OD3R; *((u32 *) out_arg + (i>>2) + 1) = arc4->OD2R; *((u32 *) out_arg + (i>>2) + 2) = arc4->OD1R; *((u32 *) out_arg + (i>>2) + 3) = arc4->OD0R; #endif i += 16; } #else // dma #endif // dma CRTCL_SECT_END; } /*! \fn arc4_chip_init (void) \ingroup IFX_ARC4_FUNCTIONS \brief initialize arc4 hardware */ static void arc4_chip_init (void) { //do nothing } /*! \fn static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) \ingroup IFX_ARC4_FUNCTIONS \brief sets ARC4 key \param tfm linux crypto algo transform \param in_key input key \param key_len key lengths less than or equal to 16 bytes supported */ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *inkey, unsigned int key_len) { //struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); volatile struct arc4_t *arc4 = (struct arc4_t *) ARC4_START; u32 *in_key = (u32 *)inkey; // must program all bits at one go?!!! //#if 1 *IFX_ARC4_CON = ( (1<<31) | ((key_len - 1)<<27) | (1<<26) | (3<<16) ); //NDC=1,ENDI=1,GO=0,KSAE=1,SM=0 arc4->K3R = *((u32 *) in_key + 0); arc4->K2R = *((u32 *) in_key + 1); arc4->K1R = *((u32 *) in_key + 2); arc4->K0R = *((u32 *) in_key + 3); #if 0 // arc4 is a ugly state machine, KSAE can only be set once per session ctx->key_length = key_len; memcpy ((u8 *) (ctx->buf), in_key, key_len); #endif return 0; } /*! \fn static void _deu_arc4_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) \ingroup IFX_ARC4_FUNCTIONS \brief sets ARC4 hardware to ECB mode \param ctx crypto algo context \param dst output bytestream \param src input bytestream \param iv initialization vector \param nbytes length of bytestream \param encdec 1 for encrypt; 0 for decrypt \param inplace not used */ static void _deu_arc4_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace) { _deu_arc4 (ctx, dst, src, NULL, nbytes, encdec, 0); } /*! \fn static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) \ingroup IFX_ARC4_FUNCTIONS \brief encrypt/decrypt ARC4_BLOCK_SIZE of data \param tfm linux crypto algo transform \param out output bytestream \param in input bytestream */ static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); _deu_arc4 (ctx, out, in, NULL, ARC4_BLOCK_SIZE, CRYPTO_DIR_DECRYPT, 0); } /* * \brief ARC4 function mappings */ static struct crypto_alg ifxdeu_arc4_alg = { .cra_name = "arc4", .cra_driver_name = "ifxdeu-arc4", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ARC4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct arc4_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(ifxdeu_arc4_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = ARC4_MIN_KEY_SIZE, .cia_max_keysize = ARC4_MAX_KEY_SIZE, .cia_setkey = arc4_set_key, .cia_encrypt = arc4_crypt, .cia_decrypt = arc4_crypt, } } }; /*! \fn static int ecb_arc4_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) \ingroup IFX_ARC4_FUNCTIONS \brief ECB ARC4 encrypt using linux crypto blkcipher \param desc blkcipher descriptor \param dst output scatterlist \param src input scatterlist \param nbytes data size in bytes */ static int ecb_arc4_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; int err; DPRINTF(1, "\n"); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { _deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0); nbytes &= ARC4_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } /*! \fn static int ecb_arc4_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) \ingroup IFX_ARC4_FUNCTIONS \brief ECB ARC4 decrypt using linux crypto blkcipher \param desc blkcipher descriptor \param dst output scatterlist \param src input scatterlist \param nbytes data size in bytes */ static int ecb_arc4_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; int err; DPRINTF(1, "\n"); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { _deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr, NULL, nbytes, CRYPTO_DIR_DECRYPT, 0); nbytes &= ARC4_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } /* * \brief ARC4 function mappings */ static struct crypto_alg ifxdeu_ecb_arc4_alg = { .cra_name = "ecb(arc4)", .cra_driver_name = "ifxdeu-ecb(arc4)", .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = ARC4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct arc4_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(ifxdeu_ecb_arc4_alg.cra_list), .cra_u = { .blkcipher = { .min_keysize = ARC4_MIN_KEY_SIZE, .max_keysize = ARC4_MAX_KEY_SIZE, .setkey = arc4_set_key, .encrypt = ecb_arc4_encrypt, .decrypt = ecb_arc4_decrypt, } } }; /*! \fn int __init ifxdeu_init_arc4(void) \ingroup IFX_ARC4_FUNCTIONS \brief initialize arc4 driver */ int __init ifxdeu_init_arc4(void) { int ret = -ENOSYS; if ((ret = crypto_register_alg(&ifxdeu_arc4_alg))) goto arc4_err; if ((ret = crypto_register_alg(&ifxdeu_ecb_arc4_alg))) goto ecb_arc4_err; arc4_chip_init (); CRTCL_SECT_INIT; printk (KERN_NOTICE "IFX DEU ARC4 initialized%s%s.\n", disable_multiblock ? "" : " (multiblock)", disable_deudma ? "" : " (DMA)"); return ret; arc4_err: crypto_unregister_alg(&ifxdeu_arc4_alg); printk(KERN_ERR "IFX arc4 initialization failed!\n"); return ret; ecb_arc4_err: crypto_unregister_alg(&ifxdeu_ecb_arc4_alg); printk (KERN_ERR "IFX ecb_arc4 initialization failed!\n"); return ret; } /*! \fn void __exit ifxdeu_fini_arc4(void) \ingroup IFX_ARC4_FUNCTIONS \brief unregister arc4 driver */ void __exit ifxdeu_fini_arc4(void) { crypto_unregister_alg (&ifxdeu_arc4_alg); crypto_unregister_alg (&ifxdeu_ecb_arc4_alg); }
gpl-2.0
TheTypoMaster/android_kernel_motoe
drivers/usb/host/ohci-at91.c
1477
16774
/* * OHCI HCD (Host Controller Driver) for USB. * * Copyright (C) 2004 SAN People (Pty) Ltd. * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org> * * AT91 Bus Glue * * Based on fragments of 2.4 driver by Rick Bronson. * Based on ohci-omap.c * * This file is licenced under the GPL. */ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <mach/hardware.h> #include <asm/gpio.h> #include <mach/board.h> #include <mach/cpu.h> #ifndef CONFIG_ARCH_AT91 #error "CONFIG_ARCH_AT91 must be defined." #endif #define valid_port(index) ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS) #define at91_for_each_port(index) \ for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++) /* interface and function clocks; sometimes also an AHB clock */ static struct clk *iclk, *fclk, *hclk; static int clocked; extern int usb_disabled(void); /*-------------------------------------------------------------------------*/ static void at91_start_clock(void) { clk_enable(hclk); clk_enable(iclk); clk_enable(fclk); clocked = 1; } static void at91_stop_clock(void) { clk_disable(fclk); clk_disable(iclk); clk_disable(hclk); clocked = 0; } static void at91_start_hc(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_regs __iomem *regs = hcd->regs; dev_dbg(&pdev->dev, "start\n"); /* * Start the USB clocks. */ at91_start_clock(); /* * The USB host controller must remain in reset. */ writel(0, &regs->control); } static void at91_stop_hc(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_regs __iomem *regs = hcd->regs; dev_dbg(&pdev->dev, "stop\n"); /* * Put the USB host controller into reset. */ writel(0, &regs->control); /* * Stop the USB clocks. */ at91_stop_clock(); } /*-------------------------------------------------------------------------*/ static void __devexit usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_at91_probe - initialize AT91-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ static int __devinit usb_hcd_at91_probe(const struct hc_driver *driver, struct platform_device *pdev) { int retval; struct usb_hcd *hcd = NULL; if (pdev->num_resources != 2) { pr_debug("hcd probe: invalid num_resources"); return -ENODEV; } if ((pdev->resource[0].flags != IORESOURCE_MEM) || (pdev->resource[1].flags != IORESOURCE_IRQ)) { pr_debug("hcd probe: invalid resource type\n"); return -ENODEV; } hcd = usb_create_hcd(driver, &pdev->dev, "at91"); if (!hcd) return -ENOMEM; hcd->rsrc_start = pdev->resource[0].start; hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed\n"); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_debug("ioremap failed\n"); retval = -EIO; goto err2; } iclk = clk_get(&pdev->dev, "ohci_clk"); if (IS_ERR(iclk)) { dev_err(&pdev->dev, "failed to get ohci_clk\n"); retval = PTR_ERR(iclk); goto err3; } fclk = clk_get(&pdev->dev, "uhpck"); if (IS_ERR(fclk)) { dev_err(&pdev->dev, "failed to get uhpck\n"); retval = PTR_ERR(fclk); goto err4; } hclk = clk_get(&pdev->dev, "hclk"); if (IS_ERR(hclk)) { dev_err(&pdev->dev, "failed to get hclk\n"); retval = PTR_ERR(hclk); goto err5; } at91_start_hc(pdev); ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); if (retval == 0) return retval; /* Error handling */ at91_stop_hc(pdev); clk_put(hclk); err5: clk_put(fclk); err4: clk_put(iclk); err3: iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); return retval; } /* may be called with controller, bus, and devices active */ /** * usb_hcd_at91_remove - shutdown processing for AT91-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_at91_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, "rmmod" or something similar. * */ static void __devexit usb_hcd_at91_remove(struct usb_hcd *hcd, struct platform_device *pdev) { usb_remove_hcd(hcd); at91_stop_hc(pdev); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); clk_put(hclk); clk_put(fclk); clk_put(iclk); fclk = iclk = hclk = NULL; dev_set_drvdata(&pdev->dev, NULL); } /*-------------------------------------------------------------------------*/ static int __devinit ohci_at91_reset (struct usb_hcd *hcd) { struct at91_usbh_data *board = hcd->self.controller->platform_data; struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; if ((ret = ohci_init(ohci)) < 0) return ret; ohci->num_ports = board->ports; return 0; } static int __devinit ohci_at91_start (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; if ((ret = ohci_run(ohci)) < 0) { err("can't start %s", hcd->self.bus_name); ohci_stop(hcd); return ret; } return 0; } static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable) { if (!valid_port(port)) return; if (!gpio_is_valid(pdata->vbus_pin[port])) return; gpio_set_value(pdata->vbus_pin[port], pdata->vbus_pin_active_low[port] ^ enable); } static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) { if (!valid_port(port)) return -EINVAL; if (!gpio_is_valid(pdata->vbus_pin[port])) return -EINVAL; return gpio_get_value(pdata->vbus_pin[port]) ^ pdata->vbus_pin_active_low[port]; } /* * Update the status data from the hub with the over-current indicator change. */ static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf) { struct at91_usbh_data *pdata = hcd->self.controller->platform_data; int length = ohci_hub_status_data(hcd, buf); int port; at91_for_each_port(port) { if (pdata->overcurrent_changed[port]) { if (!length) length = 1; buf[0] |= 1 << (port + 1); } } return length; } /* * Look at the control requests to the root hub and see if we need to override. */ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct at91_usbh_data *pdata = hcd->self.controller->platform_data; struct usb_hub_descriptor *desc; int ret = -EINVAL; u32 *data = (u32 *)buf; dev_dbg(hcd->self.controller, "ohci_at91_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n", hcd, typeReq, wValue, wIndex, buf, wLength); wIndex--; switch (typeReq) { case SetPortFeature: if (wValue == USB_PORT_FEAT_POWER) { dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n"); if (valid_port(wIndex)) { ohci_at91_usb_set_power(pdata, wIndex, 1); ret = 0; } goto out; } break; case ClearPortFeature: switch (wValue) { case USB_PORT_FEAT_C_OVER_CURRENT: dev_dbg(hcd->self.controller, "ClearPortFeature: C_OVER_CURRENT\n"); if (valid_port(wIndex)) { pdata->overcurrent_changed[wIndex] = 0; pdata->overcurrent_status[wIndex] = 0; } goto out; case USB_PORT_FEAT_OVER_CURRENT: dev_dbg(hcd->self.controller, "ClearPortFeature: OVER_CURRENT\n"); if (valid_port(wIndex)) pdata->overcurrent_status[wIndex] = 0; goto out; case USB_PORT_FEAT_POWER: dev_dbg(hcd->self.controller, "ClearPortFeature: POWER\n"); if (valid_port(wIndex)) { ohci_at91_usb_set_power(pdata, wIndex, 0); return 0; } } break; } ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength); if (ret) goto out; switch (typeReq) { case GetHubDescriptor: /* update the hub's descriptor */ desc = (struct usb_hub_descriptor *)buf; dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n", desc->wHubCharacteristics); /* remove the old configurations for power-switching, and * over-current protection, and insert our new configuration */ desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM); desc->wHubCharacteristics |= cpu_to_le16(0x0001); if (pdata->overcurrent_supported) { desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM); desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001); } dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n", desc->wHubCharacteristics); return ret; case GetPortStatus: /* check port status */ dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex); if (valid_port(wIndex)) { if (!ohci_at91_usb_get_power(pdata, wIndex)) *data &= ~cpu_to_le32(RH_PS_PPS); if (pdata->overcurrent_changed[wIndex]) *data |= cpu_to_le32(RH_PS_OCIC); if (pdata->overcurrent_status[wIndex]) *data |= cpu_to_le32(RH_PS_POCI); } } out: return ret; } /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_at91_hc_driver = { .description = hcd_name, .product_desc = "AT91 OHCI", .hcd_priv_size = sizeof(struct ohci_hcd), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .reset = ohci_at91_reset, .start = ohci_at91_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_at91_hub_status_data, .hub_control = ohci_at91_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data) { struct platform_device *pdev = data; struct at91_usbh_data *pdata = pdev->dev.platform_data; int val, gpio, port; /* From the GPIO notifying the over-current situation, find * out the corresponding port */ at91_for_each_port(port) { if (gpio_is_valid(pdata->overcurrent_pin[port]) && gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { gpio = pdata->overcurrent_pin[port]; break; } } if (port == AT91_MAX_USBH_PORTS) { dev_err(& pdev->dev, "overcurrent interrupt from unknown GPIO\n"); return IRQ_HANDLED; } val = gpio_get_value(gpio); /* When notified of an over-current situation, disable power on the corresponding port, and mark this port in over-current. */ if (!val) { ohci_at91_usb_set_power(pdata, port, 0); pdata->overcurrent_status[port] = 1; pdata->overcurrent_changed[port] = 1; } dev_dbg(& pdev->dev, "overcurrent situation %s\n", val ? "exited" : "notified"); return IRQ_HANDLED; } #ifdef CONFIG_OF static const struct of_device_id at91_ohci_dt_ids[] = { { .compatible = "atmel,at91rm9200-ohci" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids); static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32); static int __devinit ohci_at91_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; int i, gpio; enum of_gpio_flags flags; struct at91_usbh_data *pdata; u32 ports; if (!np) return 0; /* Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &at91_ohci_dma_mask; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; if (!of_property_read_u32(np, "num-ports", &ports)) pdata->ports = ports; at91_for_each_port(i) { gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, &flags); pdata->vbus_pin[i] = gpio; if (!gpio_is_valid(gpio)) continue; pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW; } at91_for_each_port(i) pdata->overcurrent_pin[i] = of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags); pdev->dev.platform_data = pdata; return 0; } #else static int __devinit ohci_at91_of_init(struct platform_device *pdev) { return 0; } #endif /*-------------------------------------------------------------------------*/ static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev) { struct at91_usbh_data *pdata; int i; int gpio; int ret; ret = ohci_at91_of_init(pdev); if (ret) return ret; pdata = pdev->dev.platform_data; if (pdata) { at91_for_each_port(i) { /* * do not configure PIO if not in relation with * real USB port on board */ if (i >= pdata->ports) { pdata->vbus_pin[i] = -EINVAL; pdata->overcurrent_pin[i] = -EINVAL; break; } if (!gpio_is_valid(pdata->vbus_pin[i])) continue; gpio = pdata->vbus_pin[i]; ret = gpio_request(gpio, "ohci_vbus"); if (ret) { dev_err(&pdev->dev, "can't request vbus gpio %d\n", gpio); continue; } ret = gpio_direction_output(gpio, !pdata->vbus_pin_active_low[i]); if (ret) { dev_err(&pdev->dev, "can't put vbus gpio %d as output %d\n", gpio, !pdata->vbus_pin_active_low[i]); gpio_free(gpio); continue; } ohci_at91_usb_set_power(pdata, i, 1); } at91_for_each_port(i) { if (!gpio_is_valid(pdata->overcurrent_pin[i])) continue; gpio = pdata->overcurrent_pin[i]; ret = gpio_request(gpio, "ohci_overcurrent"); if (ret) { dev_err(&pdev->dev, "can't request overcurrent gpio %d\n", gpio); continue; } ret = gpio_direction_input(gpio); if (ret) { dev_err(&pdev->dev, "can't configure overcurrent gpio %d as input\n", gpio); gpio_free(gpio); continue; } ret = request_irq(gpio_to_irq(gpio), ohci_hcd_at91_overcurrent_irq, IRQF_SHARED, "ohci_overcurrent", pdev); if (ret) { gpio_free(gpio); dev_err(&pdev->dev, "can't get gpio IRQ for overcurrent\n"); } } } device_init_wakeup(&pdev->dev, 1); return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); } static int __devexit ohci_hcd_at91_drv_remove(struct platform_device *pdev) { struct at91_usbh_data *pdata = pdev->dev.platform_data; int i; if (pdata) { at91_for_each_port(i) { if (!gpio_is_valid(pdata->vbus_pin[i])) continue; ohci_at91_usb_set_power(pdata, i, 0); gpio_free(pdata->vbus_pin[i]); } at91_for_each_port(i) { if (!gpio_is_valid(pdata->overcurrent_pin[i])) continue; free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev); gpio_free(pdata->overcurrent_pin[i]); } } device_init_wakeup(&pdev->dev, 0); usb_hcd_at91_remove(platform_get_drvdata(pdev), pdev); return 0; } #ifdef CONFIG_PM static int ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); if (device_may_wakeup(&pdev->dev)) enable_irq_wake(hcd->irq); /* * The integrated transceivers seem unable to notice disconnect, * reconnect, or wakeup without the 48 MHz clock active. so for * correctness, always discard connection state (using reset). * * REVISIT: some boards will be able to turn VBUS off... */ if (at91_suspend_entering_slow_clock()) { ohci_usb_reset (ohci); /* flush the writes */ (void) ohci_readl (ohci, &ohci->regs->control); at91_stop_clock(); } return 0; } static int ohci_hcd_at91_drv_resume(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); if (device_may_wakeup(&pdev->dev)) disable_irq_wake(hcd->irq); if (!clocked) at91_start_clock(); ohci_finish_controller_resume(hcd); return 0; } #else #define ohci_hcd_at91_drv_suspend NULL #define ohci_hcd_at91_drv_resume NULL #endif MODULE_ALIAS("platform:at91_ohci"); static struct platform_driver ohci_hcd_at91_driver = { .probe = ohci_hcd_at91_drv_probe, .remove = __devexit_p(ohci_hcd_at91_drv_remove), .shutdown = usb_hcd_platform_shutdown, .suspend = ohci_hcd_at91_drv_suspend, .resume = ohci_hcd_at91_drv_resume, .driver = { .name = "at91_ohci", .owner = THIS_MODULE, .of_match_table = of_match_ptr(at91_ohci_dt_ids), }, };
gpl-2.0
EPDCenter/android_kernel_woxter_nimbus_98q
drivers/pcmcia/ds.c
1733
35230
/* * ds.c -- 16-bit PCMCIA core support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds * (C) 2003 - 2010 Dominik Brodowski */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/crc32.h> #include <linux/firmware.h> #include <linux/kref.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/ss.h> #include "cs_internal.h" /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("PCMCIA Driver Services"); MODULE_LICENSE("GPL"); /*====================================================================*/ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) { const struct pcmcia_device_id *did = p_drv->id_table; unsigned int i; u32 hash; if (!p_drv->probe || !p_drv->remove) printk(KERN_DEBUG "pcmcia: %s lacks a requisite callback " "function\n", p_drv->name); while (did && did->match_flags) { for (i = 0; i < 4; i++) { if (!did->prod_id[i]) continue; hash = crc32(0, did->prod_id[i], strlen(did->prod_id[i])); if (hash == did->prod_id_hash[i]) continue; printk(KERN_DEBUG "pcmcia: %s: invalid hash for " "product string \"%s\": is 0x%x, should " "be 0x%x\n", p_drv->name, did->prod_id[i], did->prod_id_hash[i], hash); printk(KERN_DEBUG "pcmcia: see " "Documentation/pcmcia/devicetable.txt for " "details\n"); } did++; } return; } /*======================================================================*/ struct pcmcia_dynid { struct list_head node; struct pcmcia_device_id id; }; /** * pcmcia_store_new_id - add a new PCMCIA device ID to this driver and re-probe devices * @driver: target device driver * @buf: buffer for scanning device ID data * @count: input size * * Adds a new dynamic PCMCIA device ID to this driver, * and causes the driver to probe for all devices again. */ static ssize_t pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count) { struct pcmcia_dynid *dynid; struct pcmcia_driver *pdrv = to_pcmcia_drv(driver); __u16 match_flags, manf_id, card_id; __u8 func_id, function, device_no; __u32 prod_id_hash[4] = {0, 0, 0, 0}; int fields = 0; int retval = 0; fields = sscanf(buf, "%hx %hx %hx %hhx %hhx %hhx %x %x %x %x", &match_flags, &manf_id, &card_id, &func_id, &function, &device_no, &prod_id_hash[0], &prod_id_hash[1], &prod_id_hash[2], &prod_id_hash[3]); if (fields < 6) return -EINVAL; dynid = kzalloc(sizeof(struct pcmcia_dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; dynid->id.match_flags = match_flags; dynid->id.manf_id = manf_id; dynid->id.card_id = card_id; dynid->id.func_id = func_id; dynid->id.function = function; dynid->id.device_no = device_no; memcpy(dynid->id.prod_id_hash, prod_id_hash, sizeof(__u32) * 4); mutex_lock(&pdrv->dynids.lock); list_add_tail(&dynid->node, &pdrv->dynids.list); mutex_unlock(&pdrv->dynids.lock); if (get_driver(&pdrv->drv)) { retval = driver_attach(&pdrv->drv); put_driver(&pdrv->drv); } if (retval) return retval; return count; } static DRIVER_ATTR(new_id, S_IWUSR, NULL, pcmcia_store_new_id); static void pcmcia_free_dynids(struct pcmcia_driver *drv) { struct pcmcia_dynid *dynid, *n; mutex_lock(&drv->dynids.lock); list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { list_del(&dynid->node); kfree(dynid); } mutex_unlock(&drv->dynids.lock); } static int pcmcia_create_newid_file(struct pcmcia_driver *drv) { int error = 0; if (drv->probe != NULL) error = driver_create_file(&drv->drv, &driver_attr_new_id); return error; } /** * pcmcia_register_driver - register a PCMCIA driver with the bus core * @driver: the &driver being registered * * Registers a PCMCIA driver with the PCMCIA bus core. */ int pcmcia_register_driver(struct pcmcia_driver *driver) { int error; if (!driver) return -EINVAL; pcmcia_check_driver(driver); /* initialize common fields */ driver->drv.bus = &pcmcia_bus_type; driver->drv.owner = driver->owner; driver->drv.name = driver->name; mutex_init(&driver->dynids.lock); INIT_LIST_HEAD(&driver->dynids.list); pr_debug("registering driver %s\n", driver->name); error = driver_register(&driver->drv); if (error < 0) return error; error = pcmcia_create_newid_file(driver); if (error) driver_unregister(&driver->drv); return error; } EXPORT_SYMBOL(pcmcia_register_driver); /** * pcmcia_unregister_driver - unregister a PCMCIA driver with the bus core * @driver: the &driver being unregistered */ void pcmcia_unregister_driver(struct pcmcia_driver *driver) { pr_debug("unregistering driver %s\n", driver->name); driver_unregister(&driver->drv); pcmcia_free_dynids(driver); } EXPORT_SYMBOL(pcmcia_unregister_driver); /* pcmcia_device handling */ static struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev) { struct device *tmp_dev; tmp_dev = get_device(&p_dev->dev); if (!tmp_dev) return NULL; return to_pcmcia_dev(tmp_dev); } static void pcmcia_put_dev(struct pcmcia_device *p_dev) { if (p_dev) put_device(&p_dev->dev); } static void pcmcia_release_function(struct kref *ref) { struct config_t *c = container_of(ref, struct config_t, ref); pr_debug("releasing config_t\n"); kfree(c); } static void pcmcia_release_dev(struct device *dev) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); int i; dev_dbg(dev, "releasing device\n"); pcmcia_put_socket(p_dev->socket); for (i = 0; i < 4; i++) kfree(p_dev->prod_id[i]); kfree(p_dev->devname); kref_put(&p_dev->function_config->ref, pcmcia_release_function); kfree(p_dev); } static int pcmcia_device_probe(struct device *dev) { struct pcmcia_device *p_dev; struct pcmcia_driver *p_drv; struct pcmcia_socket *s; cistpl_config_t cis_config; int ret = 0; dev = get_device(dev); if (!dev) return -ENODEV; p_dev = to_pcmcia_dev(dev); p_drv = to_pcmcia_drv(dev->driver); s = p_dev->socket; dev_dbg(dev, "trying to bind to %s\n", p_drv->name); if ((!p_drv->probe) || (!p_dev->function_config) || (!try_module_get(p_drv->owner))) { ret = -EINVAL; goto put_dev; } /* set up some more device information */ ret = pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_CONFIG, &cis_config); if (!ret) { p_dev->config_base = cis_config.base; p_dev->config_regs = cis_config.rmask[0]; dev_dbg(dev, "base %x, regs %x", p_dev->config_base, p_dev->config_regs); } else { dev_printk(KERN_INFO, dev, "pcmcia: could not parse base and rmask0 of CIS\n"); p_dev->config_base = 0; p_dev->config_regs = 0; } ret = p_drv->probe(p_dev); if (ret) { dev_dbg(dev, "binding to %s failed with %d\n", p_drv->name, ret); goto put_module; } dev_dbg(dev, "%s bound: Vpp %d.%d, idx %x, IRQ %d", p_drv->name, p_dev->vpp/10, p_dev->vpp%10, p_dev->config_index, p_dev->irq); dev_dbg(dev, "resources: ioport %pR %pR iomem %pR %pR %pR", p_dev->resource[0], p_dev->resource[1], p_dev->resource[2], p_dev->resource[3], p_dev->resource[4]); mutex_lock(&s->ops_mutex); if ((s->pcmcia_pfc) && (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY); mutex_unlock(&s->ops_mutex); put_module: if (ret) module_put(p_drv->owner); put_dev: if (ret) put_device(dev); return ret; } /* * Removes a PCMCIA card from the device tree and socket list. */ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *leftover) { struct pcmcia_device *p_dev; struct pcmcia_device *tmp; dev_dbg(leftover ? &leftover->dev : &s->dev, "pcmcia_card_remove(%d) %s\n", s->sock, leftover ? leftover->devname : ""); mutex_lock(&s->ops_mutex); if (!leftover) s->device_count = 0; else s->device_count = 1; mutex_unlock(&s->ops_mutex); /* unregister all pcmcia_devices registered with this socket, except leftover */ list_for_each_entry_safe(p_dev, tmp, &s->devices_list, socket_device_list) { if (p_dev == leftover) continue; mutex_lock(&s->ops_mutex); list_del(&p_dev->socket_device_list); mutex_unlock(&s->ops_mutex); dev_dbg(&p_dev->dev, "unregistering device\n"); device_unregister(&p_dev->dev); } return; } static int pcmcia_device_remove(struct device *dev) { struct pcmcia_device *p_dev; struct pcmcia_driver *p_drv; int i; p_dev = to_pcmcia_dev(dev); p_drv = to_pcmcia_drv(dev->driver); dev_dbg(dev, "removing device\n"); /* If we're removing the primary module driving a * pseudo multi-function card, we need to unbind * all devices */ if ((p_dev->socket->pcmcia_pfc) && (p_dev->socket->device_count > 0) && (p_dev->device_no == 0)) pcmcia_card_remove(p_dev->socket, p_dev); /* detach the "instance" */ if (!p_drv) return 0; if (p_drv->remove) p_drv->remove(p_dev); /* check for proper unloading */ if (p_dev->_irq || p_dev->_io || p_dev->_locked) dev_printk(KERN_INFO, dev, "pcmcia: driver %s did not release config properly\n", p_drv->name); for (i = 0; i < MAX_WIN; i++) if (p_dev->_win & CLIENT_WIN_REQ(i)) dev_printk(KERN_INFO, dev, "pcmcia: driver %s did not release window properly\n", p_drv->name); /* references from pcmcia_probe_device */ pcmcia_put_dev(p_dev); module_put(p_drv->owner); return 0; } /* * pcmcia_device_query -- determine information about a pcmcia device */ static int pcmcia_device_query(struct pcmcia_device *p_dev) { cistpl_manfid_t manf_id; cistpl_funcid_t func_id; cistpl_vers_1_t *vers1; unsigned int i; vers1 = kmalloc(sizeof(*vers1), GFP_KERNEL); if (!vers1) return -ENOMEM; if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_MANFID, &manf_id)) { mutex_lock(&p_dev->socket->ops_mutex); p_dev->manf_id = manf_id.manf; p_dev->card_id = manf_id.card; p_dev->has_manf_id = 1; p_dev->has_card_id = 1; mutex_unlock(&p_dev->socket->ops_mutex); } if (!pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_FUNCID, &func_id)) { mutex_lock(&p_dev->socket->ops_mutex); p_dev->func_id = func_id.func; p_dev->has_func_id = 1; mutex_unlock(&p_dev->socket->ops_mutex); } else { /* rule of thumb: cards with no FUNCID, but with * common memory device geometry information, are * probably memory cards (from pcmcia-cs) */ cistpl_device_geo_t *devgeo; devgeo = kmalloc(sizeof(*devgeo), GFP_KERNEL); if (!devgeo) { kfree(vers1); return -ENOMEM; } if (!pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_DEVICE_GEO, devgeo)) { dev_dbg(&p_dev->dev, "mem device geometry probably means " "FUNCID_MEMORY\n"); mutex_lock(&p_dev->socket->ops_mutex); p_dev->func_id = CISTPL_FUNCID_MEMORY; p_dev->has_func_id = 1; mutex_unlock(&p_dev->socket->ops_mutex); } kfree(devgeo); } if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_VERS_1, vers1)) { mutex_lock(&p_dev->socket->ops_mutex); for (i = 0; i < min_t(unsigned int, 4, vers1->ns); i++) { char *tmp; unsigned int length; char *new; tmp = vers1->str + vers1->ofs[i]; length = strlen(tmp) + 1; if ((length < 2) || (length > 255)) continue; new = kmalloc(sizeof(char) * length, GFP_KERNEL); if (!new) continue; new = strncpy(new, tmp, length); tmp = p_dev->prod_id[i]; p_dev->prod_id[i] = new; kfree(tmp); } mutex_unlock(&p_dev->socket->ops_mutex); } kfree(vers1); return 0; } static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function) { struct pcmcia_device *p_dev, *tmp_dev; int i; s = pcmcia_get_socket(s); if (!s) return NULL; pr_debug("adding device to %d, function %d\n", s->sock, function); p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL); if (!p_dev) goto err_put; mutex_lock(&s->ops_mutex); p_dev->device_no = (s->device_count++); mutex_unlock(&s->ops_mutex); /* max of 2 PFC devices */ if ((p_dev->device_no >= 2) && (function == 0)) goto err_free; /* max of 4 devices overall */ if (p_dev->device_no >= 4) goto err_free; p_dev->socket = s; p_dev->func = function; p_dev->dev.bus = &pcmcia_bus_type; p_dev->dev.parent = s->dev.parent; p_dev->dev.release = pcmcia_release_dev; /* by default don't allow DMA */ p_dev->dma_mask = DMA_MASK_NONE; p_dev->dev.dma_mask = &p_dev->dma_mask; dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no); if (!dev_name(&p_dev->dev)) goto err_free; p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); if (!p_dev->devname) goto err_free; dev_dbg(&p_dev->dev, "devname is %s\n", p_dev->devname); mutex_lock(&s->ops_mutex); /* * p_dev->function_config must be the same for all card functions. * Note that this is serialized by ops_mutex, so that only one * such struct will be created. */ list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) if (p_dev->func == tmp_dev->func) { p_dev->function_config = tmp_dev->function_config; p_dev->irq = tmp_dev->irq; kref_get(&p_dev->function_config->ref); } /* Add to the list in pcmcia_bus_socket */ list_add(&p_dev->socket_device_list, &s->devices_list); if (pcmcia_setup_irq(p_dev)) dev_warn(&p_dev->dev, "IRQ setup failed -- device might not work\n"); if (!p_dev->function_config) { config_t *c; dev_dbg(&p_dev->dev, "creating config_t\n"); c = kzalloc(sizeof(struct config_t), GFP_KERNEL); if (!c) { mutex_unlock(&s->ops_mutex); goto err_unreg; } p_dev->function_config = c; kref_init(&c->ref); for (i = 0; i < MAX_IO_WIN; i++) { c->io[i].name = p_dev->devname; c->io[i].flags = IORESOURCE_IO; } for (i = 0; i< MAX_WIN; i++) { c->mem[i].name = p_dev->devname; c->mem[i].flags = IORESOURCE_MEM; } } for (i = 0; i < MAX_IO_WIN; i++) p_dev->resource[i] = &p_dev->function_config->io[i]; for (; i < (MAX_IO_WIN + MAX_WIN); i++) p_dev->resource[i] = &p_dev->function_config->mem[i-MAX_IO_WIN]; mutex_unlock(&s->ops_mutex); dev_printk(KERN_NOTICE, &p_dev->dev, "pcmcia: registering new device %s (IRQ: %d)\n", p_dev->devname, p_dev->irq); pcmcia_device_query(p_dev); if (device_register(&p_dev->dev)) goto err_unreg; return p_dev; err_unreg: mutex_lock(&s->ops_mutex); list_del(&p_dev->socket_device_list); mutex_unlock(&s->ops_mutex); err_free: mutex_lock(&s->ops_mutex); s->device_count--; mutex_unlock(&s->ops_mutex); for (i = 0; i < 4; i++) kfree(p_dev->prod_id[i]); kfree(p_dev->devname); kfree(p_dev); err_put: pcmcia_put_socket(s); return NULL; } static int pcmcia_card_add(struct pcmcia_socket *s) { cistpl_longlink_mfc_t mfc; unsigned int no_funcs, i, no_chains; int ret = -EAGAIN; mutex_lock(&s->ops_mutex); if (!(s->resource_setup_done)) { dev_dbg(&s->dev, "no resources available, delaying card_add\n"); mutex_unlock(&s->ops_mutex); return -EAGAIN; /* try again, but later... */ } if (pcmcia_validate_mem(s)) { dev_dbg(&s->dev, "validating mem resources failed, " "delaying card_add\n"); mutex_unlock(&s->ops_mutex); return -EAGAIN; /* try again, but later... */ } mutex_unlock(&s->ops_mutex); ret = pccard_validate_cis(s, &no_chains); if (ret || !no_chains) { dev_dbg(&s->dev, "invalid CIS or invalid resources\n"); return -ENODEV; } if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc)) no_funcs = mfc.nfn; else no_funcs = 1; s->functions = no_funcs; for (i = 0; i < no_funcs; i++) pcmcia_device_add(s, i); return ret; } static int pcmcia_requery_callback(struct device *dev, void * _data) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (!p_dev->dev.driver) { dev_dbg(dev, "update device information\n"); pcmcia_device_query(p_dev); } return 0; } static void pcmcia_requery(struct pcmcia_socket *s) { int has_pfc; if (s->functions == 0) { pcmcia_card_add(s); return; } /* some device information might have changed because of a CIS * update or because we can finally read it correctly... so * determine it again, overwriting old values if necessary. */ bus_for_each_dev(&pcmcia_bus_type, NULL, NULL, pcmcia_requery_callback); /* if the CIS changed, we need to check whether the number of * functions changed. */ if (s->fake_cis) { int old_funcs, new_funcs; cistpl_longlink_mfc_t mfc; /* does this cis override add or remove functions? */ old_funcs = s->functions; if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc)) new_funcs = mfc.nfn; else new_funcs = 1; if (old_funcs != new_funcs) { /* we need to re-start */ pcmcia_card_remove(s, NULL); s->functions = 0; pcmcia_card_add(s); } } /* If the PCMCIA device consists of two pseudo devices, * call pcmcia_device_add() -- which will fail if both * devices are already registered. */ mutex_lock(&s->ops_mutex); has_pfc = s->pcmcia_pfc; mutex_unlock(&s->ops_mutex); if (has_pfc) pcmcia_device_add(s, 0); /* we re-scan all devices, not just the ones connected to this * socket. This does not matter, though. */ if (bus_rescan_devices(&pcmcia_bus_type)) dev_warn(&s->dev, "rescanning the bus failed\n"); } #ifdef CONFIG_PCMCIA_LOAD_CIS /** * pcmcia_load_firmware - load CIS from userspace if device-provided is broken * @dev: the pcmcia device which needs a CIS override * @filename: requested filename in /lib/firmware/ * * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if * the one provided by the card is broken. The firmware files reside in * /lib/firmware/ in userspace. */ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) { struct pcmcia_socket *s = dev->socket; const struct firmware *fw; int ret = -ENOMEM; cistpl_longlink_mfc_t mfc; int old_funcs, new_funcs = 1; if (!filename) return -EINVAL; dev_dbg(&dev->dev, "trying to load CIS file %s\n", filename); if (request_firmware(&fw, filename, &dev->dev) == 0) { if (fw->size >= CISTPL_MAX_CIS_SIZE) { ret = -EINVAL; dev_printk(KERN_ERR, &dev->dev, "pcmcia: CIS override is too big\n"); goto release; } if (!pcmcia_replace_cis(s, fw->data, fw->size)) ret = 0; else { dev_printk(KERN_ERR, &dev->dev, "pcmcia: CIS override failed\n"); goto release; } /* we need to re-start if the number of functions changed */ old_funcs = s->functions; if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc)) new_funcs = mfc.nfn; if (old_funcs != new_funcs) ret = -EBUSY; /* update information */ pcmcia_device_query(dev); /* requery (as number of functions might have changed) */ pcmcia_parse_uevents(s, PCMCIA_UEVENT_REQUERY); } release: release_firmware(fw); return ret; } #else /* !CONFIG_PCMCIA_LOAD_CIS */ static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) { return -ENODEV; } #endif static inline int pcmcia_devmatch(struct pcmcia_device *dev, const struct pcmcia_device_id *did) { if (did->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID) { if ((!dev->has_manf_id) || (dev->manf_id != did->manf_id)) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID) { if ((!dev->has_card_id) || (dev->card_id != did->card_id)) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION) { if (dev->func != did->function) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1) { if (!dev->prod_id[0]) return 0; if (strcmp(did->prod_id[0], dev->prod_id[0])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2) { if (!dev->prod_id[1]) return 0; if (strcmp(did->prod_id[1], dev->prod_id[1])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3) { if (!dev->prod_id[2]) return 0; if (strcmp(did->prod_id[2], dev->prod_id[2])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4) { if (!dev->prod_id[3]) return 0; if (strcmp(did->prod_id[3], dev->prod_id[3])) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { dev_dbg(&dev->dev, "this is a pseudo-multi-function device\n"); mutex_lock(&dev->socket->ops_mutex); dev->socket->pcmcia_pfc = 1; mutex_unlock(&dev->socket->ops_mutex); if (dev->device_no != did->device_no) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { int ret; if ((!dev->has_func_id) || (dev->func_id != did->func_id)) return 0; /* if this is a pseudo-multi-function device, * we need explicit matches */ if (dev->socket->pcmcia_pfc) return 0; if (dev->device_no) return 0; /* also, FUNC_ID matching needs to be activated by userspace * after it has re-checked that there is no possible module * with a prod_id/manf_id/card_id match. */ mutex_lock(&dev->socket->ops_mutex); ret = dev->allow_func_id_match; mutex_unlock(&dev->socket->ops_mutex); if (!ret) { dev_dbg(&dev->dev, "skipping FUNC_ID match until userspace ACK\n"); return 0; } } if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { dev_dbg(&dev->dev, "device needs a fake CIS\n"); if (!dev->socket->fake_cis) if (pcmcia_load_firmware(dev, did->cisfile)) return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { int i; for (i = 0; i < 4; i++) if (dev->prod_id[i]) return 0; if (dev->has_manf_id || dev->has_card_id || dev->has_func_id) return 0; } return 1; } static int pcmcia_bus_match(struct device *dev, struct device_driver *drv) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); struct pcmcia_driver *p_drv = to_pcmcia_drv(drv); const struct pcmcia_device_id *did = p_drv->id_table; struct pcmcia_dynid *dynid; /* match dynamic devices first */ mutex_lock(&p_drv->dynids.lock); list_for_each_entry(dynid, &p_drv->dynids.list, node) { dev_dbg(dev, "trying to match to %s\n", drv->name); if (pcmcia_devmatch(p_dev, &dynid->id)) { dev_dbg(dev, "matched to %s\n", drv->name); mutex_unlock(&p_drv->dynids.lock); return 1; } } mutex_unlock(&p_drv->dynids.lock); while (did && did->match_flags) { dev_dbg(dev, "trying to match to %s\n", drv->name); if (pcmcia_devmatch(p_dev, did)) { dev_dbg(dev, "matched to %s\n", drv->name); return 1; } did++; } return 0; } #ifdef CONFIG_HOTPLUG static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { struct pcmcia_device *p_dev; int i; u32 hash[4] = { 0, 0, 0, 0}; if (!dev) return -ENODEV; p_dev = to_pcmcia_dev(dev); /* calculate hashes */ for (i = 0; i < 4; i++) { if (!p_dev->prod_id[i]) continue; hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i])); } if (add_uevent_var(env, "SOCKET_NO=%u", p_dev->socket->sock)) return -ENOMEM; if (add_uevent_var(env, "DEVICE_NO=%02X", p_dev->device_no)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X" "pa%08Xpb%08Xpc%08Xpd%08X", p_dev->has_manf_id ? p_dev->manf_id : 0, p_dev->has_card_id ? p_dev->card_id : 0, p_dev->has_func_id ? p_dev->func_id : 0, p_dev->func, p_dev->device_no, hash[0], hash[1], hash[2], hash[3])) return -ENOMEM; return 0; } #else static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } #endif /************************ runtime PM support ***************************/ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state); static int pcmcia_dev_resume(struct device *dev); static int runtime_suspend(struct device *dev) { int rc; device_lock(dev); rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND); device_unlock(dev); return rc; } static int runtime_resume(struct device *dev) { int rc; device_lock(dev); rc = pcmcia_dev_resume(dev); device_unlock(dev); return rc; } /************************ per-device sysfs output ***************************/ #define pcmcia_device_attr(field, test, format) \ static ssize_t field##_show (struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \ return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \ } #define pcmcia_device_stringattr(name, field) \ static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \ return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \ } pcmcia_device_attr(func, socket, "0x%02x\n"); pcmcia_device_attr(func_id, has_func_id, "0x%02x\n"); pcmcia_device_attr(manf_id, has_manf_id, "0x%04x\n"); pcmcia_device_attr(card_id, has_card_id, "0x%04x\n"); pcmcia_device_stringattr(prod_id1, prod_id[0]); pcmcia_device_stringattr(prod_id2, prod_id[1]); pcmcia_device_stringattr(prod_id3, prod_id[2]); pcmcia_device_stringattr(prod_id4, prod_id[3]); static ssize_t pcmcia_show_resources(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); char *str = buf; int i; for (i = 0; i < PCMCIA_NUM_RESOURCES; i++) str += sprintf(str, "%pr\n", p_dev->resource[i]); return str - buf; } static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (p_dev->suspended) return sprintf(buf, "off\n"); else return sprintf(buf, "on\n"); } static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); int ret = 0; if (!count) return -EINVAL; if ((!p_dev->suspended) && !strncmp(buf, "off", 3)) ret = runtime_suspend(dev); else if (p_dev->suspended && !strncmp(buf, "on", 2)) ret = runtime_resume(dev); return ret ? ret : count; } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); int i; u32 hash[4] = { 0, 0, 0, 0}; /* calculate hashes */ for (i = 0; i < 4; i++) { if (!p_dev->prod_id[i]) continue; hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i])); } return sprintf(buf, "pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X" "pa%08Xpb%08Xpc%08Xpd%08X\n", p_dev->has_manf_id ? p_dev->manf_id : 0, p_dev->has_card_id ? p_dev->card_id : 0, p_dev->has_func_id ? p_dev->func_id : 0, p_dev->func, p_dev->device_no, hash[0], hash[1], hash[2], hash[3]); } static ssize_t pcmcia_store_allow_func_id_match(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (!count) return -EINVAL; mutex_lock(&p_dev->socket->ops_mutex); p_dev->allow_func_id_match = 1; mutex_unlock(&p_dev->socket->ops_mutex); pcmcia_parse_uevents(p_dev->socket, PCMCIA_UEVENT_REQUERY); return count; } static struct device_attribute pcmcia_dev_attrs[] = { __ATTR(function, 0444, func_show, NULL), __ATTR(pm_state, 0644, pcmcia_show_pm_state, pcmcia_store_pm_state), __ATTR(resources, 0444, pcmcia_show_resources, NULL), __ATTR_RO(func_id), __ATTR_RO(manf_id), __ATTR_RO(card_id), __ATTR_RO(prod_id1), __ATTR_RO(prod_id2), __ATTR_RO(prod_id3), __ATTR_RO(prod_id4), __ATTR_RO(modalias), __ATTR(allow_func_id_match, 0200, NULL, pcmcia_store_allow_func_id_match), __ATTR_NULL, }; /* PM support, also needed for reset */ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); struct pcmcia_driver *p_drv = NULL; int ret = 0; mutex_lock(&p_dev->socket->ops_mutex); if (p_dev->suspended) { mutex_unlock(&p_dev->socket->ops_mutex); return 0; } p_dev->suspended = 1; mutex_unlock(&p_dev->socket->ops_mutex); dev_dbg(dev, "suspending\n"); if (dev->driver) p_drv = to_pcmcia_drv(dev->driver); if (!p_drv) goto out; if (p_drv->suspend) { ret = p_drv->suspend(p_dev); if (ret) { dev_printk(KERN_ERR, dev, "pcmcia: device %s (driver %s) did " "not want to go to sleep (%d)\n", p_dev->devname, p_drv->name, ret); mutex_lock(&p_dev->socket->ops_mutex); p_dev->suspended = 0; mutex_unlock(&p_dev->socket->ops_mutex); goto out; } } if (p_dev->device_no == p_dev->func) { dev_dbg(dev, "releasing configuration\n"); pcmcia_release_configuration(p_dev); } out: return ret; } static int pcmcia_dev_resume(struct device *dev) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); struct pcmcia_driver *p_drv = NULL; int ret = 0; mutex_lock(&p_dev->socket->ops_mutex); if (!p_dev->suspended) { mutex_unlock(&p_dev->socket->ops_mutex); return 0; } p_dev->suspended = 0; mutex_unlock(&p_dev->socket->ops_mutex); dev_dbg(dev, "resuming\n"); if (dev->driver) p_drv = to_pcmcia_drv(dev->driver); if (!p_drv) goto out; if (p_dev->device_no == p_dev->func) { dev_dbg(dev, "requesting configuration\n"); ret = pcmcia_enable_device(p_dev); if (ret) goto out; } if (p_drv->resume) ret = p_drv->resume(p_dev); out: return ret; } static int pcmcia_bus_suspend_callback(struct device *dev, void * _data) { struct pcmcia_socket *skt = _data; struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (p_dev->socket != skt || p_dev->suspended) return 0; return runtime_suspend(dev); } static int pcmcia_bus_resume_callback(struct device *dev, void * _data) { struct pcmcia_socket *skt = _data; struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (p_dev->socket != skt || !p_dev->suspended) return 0; runtime_resume(dev); return 0; } static int pcmcia_bus_resume(struct pcmcia_socket *skt) { dev_dbg(&skt->dev, "resuming socket %d\n", skt->sock); bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback); return 0; } static int pcmcia_bus_suspend(struct pcmcia_socket *skt) { dev_dbg(&skt->dev, "suspending socket %d\n", skt->sock); if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_suspend_callback)) { pcmcia_bus_resume(skt); return -EIO; } return 0; } static int pcmcia_bus_remove(struct pcmcia_socket *skt) { atomic_set(&skt->present, 0); pcmcia_card_remove(skt, NULL); mutex_lock(&skt->ops_mutex); destroy_cis_cache(skt); pcmcia_cleanup_irq(skt); mutex_unlock(&skt->ops_mutex); return 0; } static int pcmcia_bus_add(struct pcmcia_socket *skt) { atomic_set(&skt->present, 1); mutex_lock(&skt->ops_mutex); skt->pcmcia_pfc = 0; destroy_cis_cache(skt); /* to be on the safe side... */ mutex_unlock(&skt->ops_mutex); pcmcia_card_add(skt); return 0; } static int pcmcia_bus_early_resume(struct pcmcia_socket *skt) { if (!verify_cis_cache(skt)) return 0; dev_dbg(&skt->dev, "cis mismatch - different card\n"); /* first, remove the card */ pcmcia_bus_remove(skt); mutex_lock(&skt->ops_mutex); destroy_cis_cache(skt); kfree(skt->fake_cis); skt->fake_cis = NULL; skt->functions = 0; mutex_unlock(&skt->ops_mutex); /* now, add the new card */ pcmcia_bus_add(skt); return 0; } /* * NOTE: This is racy. There's no guarantee the card will still be * physically present, even if the call to this function returns * non-NULL. Furthermore, the device driver most likely is unbound * almost immediately, so the timeframe where pcmcia_dev_present * returns NULL is probably really really small. */ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) { struct pcmcia_device *p_dev; struct pcmcia_device *ret = NULL; p_dev = pcmcia_get_dev(_p_dev); if (!p_dev) return NULL; if (atomic_read(&p_dev->socket->present) != 0) ret = p_dev; pcmcia_put_dev(p_dev); return ret; } EXPORT_SYMBOL(pcmcia_dev_present); static struct pcmcia_callback pcmcia_bus_callback = { .owner = THIS_MODULE, .add = pcmcia_bus_add, .remove = pcmcia_bus_remove, .requery = pcmcia_requery, .validate = pccard_validate_cis, .suspend = pcmcia_bus_suspend, .early_resume = pcmcia_bus_early_resume, .resume = pcmcia_bus_resume, }; static int __devinit pcmcia_bus_add_socket(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *socket = dev_get_drvdata(dev); int ret; socket = pcmcia_get_socket(socket); if (!socket) { dev_printk(KERN_ERR, dev, "PCMCIA obtaining reference to socket failed\n"); return -ENODEV; } ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr); if (ret) { dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n"); pcmcia_put_socket(socket); return ret; } INIT_LIST_HEAD(&socket->devices_list); socket->pcmcia_pfc = 0; socket->device_count = 0; atomic_set(&socket->present, 0); ret = pccard_register_pcmcia(socket, &pcmcia_bus_callback); if (ret) { dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n"); pcmcia_put_socket(socket); return ret; } return 0; } static void pcmcia_bus_remove_socket(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *socket = dev_get_drvdata(dev); if (!socket) return; pccard_register_pcmcia(socket, NULL); /* unregister any unbound devices */ mutex_lock(&socket->skt_mutex); pcmcia_card_remove(socket, NULL); release_cis_mem(socket); mutex_unlock(&socket->skt_mutex); sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr); pcmcia_put_socket(socket); return; } /* the pcmcia_bus_interface is used to handle pcmcia socket devices */ static struct class_interface pcmcia_bus_interface __refdata = { .class = &pcmcia_socket_class, .add_dev = &pcmcia_bus_add_socket, .remove_dev = &pcmcia_bus_remove_socket, }; struct bus_type pcmcia_bus_type = { .name = "pcmcia", .uevent = pcmcia_bus_uevent, .match = pcmcia_bus_match, .dev_attrs = pcmcia_dev_attrs, .probe = pcmcia_device_probe, .remove = pcmcia_device_remove, .suspend = pcmcia_dev_suspend, .resume = pcmcia_dev_resume, }; static int __init init_pcmcia_bus(void) { int ret; ret = bus_register(&pcmcia_bus_type); if (ret < 0) { printk(KERN_WARNING "pcmcia: bus_register error: %d\n", ret); return ret; } ret = class_interface_register(&pcmcia_bus_interface); if (ret < 0) { printk(KERN_WARNING "pcmcia: class_interface_register error: %d\n", ret); bus_unregister(&pcmcia_bus_type); return ret; } return 0; } fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that * pcmcia_socket_class is already registered */ static void __exit exit_pcmcia_bus(void) { class_interface_unregister(&pcmcia_bus_interface); bus_unregister(&pcmcia_bus_type); } module_exit(exit_pcmcia_bus); MODULE_ALIAS("ds");
gpl-2.0
koquantam/android_kernel_oc_vivalto3gvn
arch/mips/jz4740/platform.c
1989
7422
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 platform devices * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/device.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/dma-mapping.h> #include <asm/mach-jz4740/platform.h> #include <asm/mach-jz4740/base.h> #include <asm/mach-jz4740/irq.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include "serial.h" #include "clock.h" /* OHCI controller */ static struct resource jz4740_usb_ohci_resources[] = { { .start = JZ4740_UHC_BASE_ADDR, .end = JZ4740_UHC_BASE_ADDR + 0x1000 - 1, .flags = IORESOURCE_MEM, }, { .start = JZ4740_IRQ_UHC, .end = JZ4740_IRQ_UHC, .flags = IORESOURCE_IRQ, }, }; struct platform_device jz4740_usb_ohci_device = { .name = "jz4740-ohci", .id = -1, .dev = { .dma_mask = &jz4740_usb_ohci_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(jz4740_usb_ohci_resources), .resource = jz4740_usb_ohci_resources, }; /* UDC (USB gadget controller) */ static struct resource jz4740_usb_gdt_resources[] = { { .start = JZ4740_UDC_BASE_ADDR, .end = JZ4740_UDC_BASE_ADDR + 0x1000 - 1, .flags = IORESOURCE_MEM, }, { .start = JZ4740_IRQ_UDC, .end = JZ4740_IRQ_UDC, .flags = IORESOURCE_IRQ, }, }; struct platform_device jz4740_udc_device = { .name = "jz-udc", .id = -1, .dev = { .dma_mask = &jz4740_udc_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(jz4740_usb_gdt_resources), .resource = jz4740_usb_gdt_resources, }; /* MMC/SD controller */ static struct resource jz4740_mmc_resources[] = { { .start = JZ4740_MSC_BASE_ADDR, .end = JZ4740_MSC_BASE_ADDR + 0x1000 - 1, .flags = IORESOURCE_MEM, }, { .start = JZ4740_IRQ_MSC, .end = JZ4740_IRQ_MSC, .flags = IORESOURCE_IRQ, } }; struct platform_device jz4740_mmc_device = { .name = "jz4740-mmc", .id = 0, .dev = { .dma_mask = &jz4740_mmc_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(jz4740_mmc_resources), .resource = jz4740_mmc_resources, }; /* RTC controller */ static struct resource jz4740_rtc_resources[] = { { .start = JZ4740_RTC_BASE_ADDR, .end = JZ4740_RTC_BASE_ADDR + 0x38 - 1, .flags = IORESOURCE_MEM, }, { .start = JZ4740_IRQ_RTC, .end = JZ4740_IRQ_RTC, .flags = IORESOURCE_IRQ, }, }; struct platform_device jz4740_rtc_device = { .name = "jz4740-rtc", .id = -1, .num_resources = ARRAY_SIZE(jz4740_rtc_resources), .resource = jz4740_rtc_resources, }; /* I2C controller */ static struct resource jz4740_i2c_resources[] = { { .start = JZ4740_I2C_BASE_ADDR, .end = JZ4740_I2C_BASE_ADDR + 0x1000 - 1, .flags = IORESOURCE_MEM, }, { .start = JZ4740_IRQ_I2C, .end = JZ4740_IRQ_I2C, .flags = IORESOURCE_IRQ, } }; struct platform_device jz4740_i2c_device = { .name = "jz4740-i2c", .id = 0, .num_resources = ARRAY_SIZE(jz4740_i2c_resources), .resource = jz4740_i2c_resources, }; /* NAND controller */ static struct resource jz4740_nand_resources[] = { { .name = "mmio", .start = JZ4740_EMC_BASE_ADDR, .end = JZ4740_EMC_BASE_ADDR + 0x1000 - 1, .flags = IORESOURCE_MEM, }, { .name = "bank1", .start = 0x18000000, .end = 0x180C0000 - 1, .flags = IORESOURCE_MEM, }, { .name = "bank2", .start = 0x14000000, .end = 0x140C0000 - 1, .flags = IORESOURCE_MEM, }, { .name = "bank3", .start = 0x0C000000, .end = 0x0C0C0000 - 1, .flags = IORESOURCE_MEM, }, { .name = "bank4", .start = 0x08000000, .end = 0x080C0000 - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device jz4740_nand_device = { .name = "jz4740-nand", .num_resources = ARRAY_SIZE(jz4740_nand_resources), .resource = jz4740_nand_resources, }; /* LCD controller */ static struct resource jz4740_framebuffer_resources[] = { { .start = JZ4740_LCD_BASE_ADDR, .end = JZ4740_LCD_BASE_ADDR + 0x1000 - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device jz4740_framebuffer_device = { .name = "jz4740-fb", .id = -1, .num_resources = ARRAY_SIZE(jz4740_framebuffer_resources), .resource = jz4740_framebuffer_resources, .dev = { .dma_mask = &jz4740_framebuffer_device.dev.coherent_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; /* I2S controller */ static struct resource jz4740_i2s_resources[] = { { .start = JZ4740_AIC_BASE_ADDR, .end = JZ4740_AIC_BASE_ADDR + 0x38 - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device jz4740_i2s_device = { .name = "jz4740-i2s", .id = -1, .num_resources = ARRAY_SIZE(jz4740_i2s_resources), .resource = jz4740_i2s_resources, }; /* PCM */ struct platform_device jz4740_pcm_device = { .name = "jz4740-pcm-audio", .id = -1, }; /* Codec */ static struct resource jz4740_codec_resources[] = { { .start = JZ4740_AIC_BASE_ADDR + 0x80, .end = JZ4740_AIC_BASE_ADDR + 0x88 - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device jz4740_codec_device = { .name = "jz4740-codec", .id = -1, .num_resources = ARRAY_SIZE(jz4740_codec_resources), .resource = jz4740_codec_resources, }; /* ADC controller */ static struct resource jz4740_adc_resources[] = { { .start = JZ4740_SADC_BASE_ADDR, .end = JZ4740_SADC_BASE_ADDR + 0x30, .flags = IORESOURCE_MEM, }, { .start = JZ4740_IRQ_SADC, .end = JZ4740_IRQ_SADC, .flags = IORESOURCE_IRQ, }, { .start = JZ4740_IRQ_ADC_BASE, .end = JZ4740_IRQ_ADC_BASE, .flags = IORESOURCE_IRQ, }, }; struct platform_device jz4740_adc_device = { .name = "jz4740-adc", .id = -1, .num_resources = ARRAY_SIZE(jz4740_adc_resources), .resource = jz4740_adc_resources, }; /* Serial */ #define JZ4740_UART_DATA(_id) \ { \ .flags = UPF_SKIP_TEST | UPF_IOREMAP | UPF_FIXED_TYPE, \ .iotype = UPIO_MEM, \ .regshift = 2, \ .serial_out = jz4740_serial_out, \ .type = PORT_16550, \ .mapbase = JZ4740_UART ## _id ## _BASE_ADDR, \ .irq = JZ4740_IRQ_UART ## _id, \ } static struct plat_serial8250_port jz4740_uart_data[] = { JZ4740_UART_DATA(0), JZ4740_UART_DATA(1), {}, }; static struct platform_device jz4740_uart_device = { .name = "serial8250", .id = 0, .dev = { .platform_data = jz4740_uart_data, }, }; void jz4740_serial_device_register(void) { struct plat_serial8250_port *p; for (p = jz4740_uart_data; p->flags != 0; ++p) p->uartclk = jz4740_clock_bdata.ext_rate; platform_device_register(&jz4740_uart_device); } /* Watchdog */ static struct resource jz4740_wdt_resources[] = { { .start = JZ4740_WDT_BASE_ADDR, .end = JZ4740_WDT_BASE_ADDR + 0x10 - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device jz4740_wdt_device = { .name = "jz4740-wdt", .id = -1, .num_resources = ARRAY_SIZE(jz4740_wdt_resources), .resource = jz4740_wdt_resources, }; /* PWM */ struct platform_device jz4740_pwm_device = { .name = "jz4740-pwm", .id = -1, };
gpl-2.0
LorDClockaN/Ace_CM7_kernel
arch/arm/mach-ns9xxx/gpio-ns9360.c
1989
2841
/* * arch/arm/mach-ns9xxx/gpio-ns9360.c * * Copyright (C) 2006,2007 by Digi International Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/bug.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <mach/regs-bbu.h> #include <mach/processor-ns9360.h> #include "gpio-ns9360.h" static inline int ns9360_valid_gpio(unsigned gpio) { return gpio <= 72; } static inline void __iomem *ns9360_gpio_get_gconfaddr(unsigned gpio) { if (gpio < 56) return BBU_GCONFb1(gpio / 8); else /* * this could be optimised away on * ns9750 only builds, but it isn't ... */ return BBU_GCONFb2((gpio - 56) / 8); } static inline void __iomem *ns9360_gpio_get_gctrladdr(unsigned gpio) { if (gpio < 32) return BBU_GCTRL1; else if (gpio < 64) return BBU_GCTRL2; else /* this could be optimised away on ns9750 only builds */ return BBU_GCTRL3; } static inline void __iomem *ns9360_gpio_get_gstataddr(unsigned gpio) { if (gpio < 32) return BBU_GSTAT1; else if (gpio < 64) return BBU_GSTAT2; else /* this could be optimised away on ns9750 only builds */ return BBU_GSTAT3; } /* * each gpio can serve for 4 different purposes [0..3]. These are called * "functions" and passed in the parameter func. Functions 0-2 are always some * special things, function 3 is GPIO. If func == 3 dir specifies input or * output, and with inv you can enable an inverter (independent of func). */ int __ns9360_gpio_configure(unsigned gpio, int dir, int inv, int func) { void __iomem *conf = ns9360_gpio_get_gconfaddr(gpio); u32 confval; confval = __raw_readl(conf); REGSETIM_IDX(confval, BBU_GCONFx, DIR, gpio & 7, dir); REGSETIM_IDX(confval, BBU_GCONFx, INV, gpio & 7, inv); REGSETIM_IDX(confval, BBU_GCONFx, FUNC, gpio & 7, func); __raw_writel(confval, conf); return 0; } int ns9360_gpio_configure(unsigned gpio, int inv, int func) { if (likely(ns9360_valid_gpio(gpio))) { if (func == 3) { printk(KERN_WARNING "use gpio_direction_input " "or gpio_direction_output\n"); return -EINVAL; } else return __ns9360_gpio_configure(gpio, 0, inv, func); } else return -EINVAL; } EXPORT_SYMBOL(ns9360_gpio_configure); int ns9360_gpio_get_value(unsigned gpio) { void __iomem *stat = ns9360_gpio_get_gstataddr(gpio); int ret; ret = 1 & (__raw_readl(stat) >> (gpio & 31)); return ret; } void ns9360_gpio_set_value(unsigned gpio, int value) { void __iomem *ctrl = ns9360_gpio_get_gctrladdr(gpio); u32 ctrlval; ctrlval = __raw_readl(ctrl); if (value) ctrlval |= 1 << (gpio & 31); else ctrlval &= ~(1 << (gpio & 31)); __raw_writel(ctrlval, ctrl); }
gpl-2.0
Lprigara/KernelLinuxRaspberry
drivers/media/usb/cx231xx/cx231xx-i2c.c
2501
12937
/* cx231xx-i2c.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver Based on Cx23885 driver This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/usb.h> #include <linux/i2c.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include "cx231xx.h" /* ----------------------------------------------------------- */ static unsigned int i2c_scan; module_param(i2c_scan, int, 0444); MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time"); static unsigned int i2c_debug; module_param(i2c_debug, int, 0644); MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]"); #define dprintk1(lvl, fmt, args...) \ do { \ if (i2c_debug >= lvl) { \ printk(fmt, ##args); \ } \ } while (0) #define dprintk2(lvl, fmt, args...) \ do { \ if (i2c_debug >= lvl) { \ printk(KERN_DEBUG "%s at %s: " fmt, \ dev->name, __func__ , ##args); \ } \ } while (0) static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus, const struct i2c_msg *msg, int tuner_type) { if (bus->nr != dev->board.tuner_i2c_master) return false; if (msg->addr != dev->board.tuner_addr) return false; if (dev->tuner_type != tuner_type) return false; return true; } /* * cx231xx_i2c_send_bytes() */ static int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; u16 size = 0; u8 loop = 0; u8 saddr_len = 1; u8 *buf_ptr = NULL; u16 saddr = 0; u8 need_gpio = 0; if (is_tuner(dev, bus, msg, TUNER_XC5000)) { size = msg->len; if (size == 2) { /* register write sub addr */ /* Just writing sub address will cause problem * to XC5000. So ignore the request */ return 0; } else if (size == 4) { /* register write with sub addr */ if (msg->len >= 2) saddr = msg->buf[0] << 8 | msg->buf[1]; else if (msg->len == 1) saddr = msg->buf[0]; switch (saddr) { case 0x0000: /* start tuner calibration mode */ need_gpio = 1; /* FW Loading is done */ dev->xc_fw_load_done = 1; break; case 0x000D: /* Set signal source */ case 0x0001: /* Set TV standard - Video */ case 0x0002: /* Set TV standard - Audio */ case 0x0003: /* Set RF Frequency */ need_gpio = 1; break; default: if (dev->xc_fw_load_done) need_gpio = 1; break; } if (need_gpio) { dprintk1(1, "GPIO WRITE: addr 0x%x, len %d, saddr 0x%x\n", msg->addr, msg->len, saddr); return dev->cx231xx_gpio_i2c_write(dev, msg->addr, msg->buf, msg->len); } } /* special case for Xc5000 tuner case */ saddr_len = 1; /* adjust the length to correct length */ size -= saddr_len; buf_ptr = (u8 *) (msg->buf + 1); do { /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = saddr_len; req_data.saddr_dat = msg->buf[0]; req_data.buf_size = size > 16 ? 16 : size; req_data.p_buffer = (u8 *) (buf_ptr + loop * 16); bus->i2c_nostop = (size > 16) ? 1 : 0; bus->i2c_reserve = (loop == 0) ? 0 : 1; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); loop++; if (size >= 16) size -= 16; else size = 0; } while (size > 0); bus->i2c_nostop = 0; bus->i2c_reserve = 0; } else { /* regular case */ /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = 0; req_data.saddr_dat = 0; req_data.buf_size = msg->len; req_data.p_buffer = msg->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); } return status < 0 ? status : 0; } /* * cx231xx_i2c_recv_bytes() * read a byte from the i2c device */ static int cx231xx_i2c_recv_bytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; u16 saddr = 0; u8 need_gpio = 0; if (is_tuner(dev, bus, msg, TUNER_XC5000)) { if (msg->len == 2) saddr = msg->buf[0] << 8 | msg->buf[1]; else if (msg->len == 1) saddr = msg->buf[0]; if (dev->xc_fw_load_done) { switch (saddr) { case 0x0009: /* BUSY check */ dprintk1(1, "GPIO R E A D: Special case BUSY check \n"); /*Try read BUSY register, just set it to zero*/ msg->buf[0] = 0; if (msg->len == 2) msg->buf[1] = 0; return 0; case 0x0004: /* read Lock status */ need_gpio = 1; break; } if (need_gpio) { /* this is a special case to handle Xceive tuner clock stretch issue with gpio based I2C */ dprintk1(1, "GPIO R E A D: addr 0x%x, len %d, saddr 0x%x\n", msg->addr, msg->len, msg->buf[0] << 8 | msg->buf[1]); status = dev->cx231xx_gpio_i2c_write(dev, msg->addr, msg->buf, msg->len); status = dev->cx231xx_gpio_i2c_read(dev, msg->addr, msg->buf, msg->len); return status; } } /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = msg->len; req_data.saddr_dat = msg->buf[0] << 8 | msg->buf[1]; req_data.buf_size = msg->len; req_data.p_buffer = msg->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); } else { /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = 0; req_data.saddr_dat = 0; req_data.buf_size = msg->len; req_data.p_buffer = msg->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); } return status < 0 ? status : 0; } /* * cx231xx_i2c_recv_bytes_with_saddr() * read a byte from the i2c device */ static int cx231xx_i2c_recv_bytes_with_saddr(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg1, const struct i2c_msg *msg2) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; u16 saddr = 0; u8 need_gpio = 0; if (msg1->len == 2) saddr = msg1->buf[0] << 8 | msg1->buf[1]; else if (msg1->len == 1) saddr = msg1->buf[0]; if (is_tuner(dev, bus, msg2, TUNER_XC5000)) { if ((msg2->len < 16)) { dprintk1(1, "i2c_read: addr 0x%x, len %d, saddr 0x%x, len %d\n", msg2->addr, msg2->len, saddr, msg1->len); switch (saddr) { case 0x0008: /* read FW load status */ need_gpio = 1; break; case 0x0004: /* read Lock status */ need_gpio = 1; break; } if (need_gpio) { status = dev->cx231xx_gpio_i2c_write(dev, msg1->addr, msg1->buf, msg1->len); status = dev->cx231xx_gpio_i2c_read(dev, msg2->addr, msg2->buf, msg2->len); return status; } } } /* prepare xfer_data struct */ req_data.dev_addr = msg2->addr; req_data.direction = msg2->flags; req_data.saddr_len = msg1->len; req_data.saddr_dat = saddr; req_data.buf_size = msg2->len; req_data.p_buffer = msg2->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); return status < 0 ? status : 0; } /* * cx231xx_i2c_check_for_device() * check if there is a i2c_device at the supplied address */ static int cx231xx_i2c_check_for_device(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = 0; req_data.saddr_dat = 0; req_data.buf_size = 0; req_data.p_buffer = NULL; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); return status < 0 ? status : 0; } /* * cx231xx_i2c_xfer() * the main i2c transfer function */ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; int addr, rc, i, byte; if (num <= 0) return 0; mutex_lock(&dev->i2c_lock); for (i = 0; i < num; i++) { addr = msgs[i].addr >> 1; dprintk2(2, "%s %s addr=%x len=%d:", (msgs[i].flags & I2C_M_RD) ? "read" : "write", i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len); if (!msgs[i].len) { /* no len: check only for device presence */ rc = cx231xx_i2c_check_for_device(i2c_adap, &msgs[i]); if (rc < 0) { dprintk2(2, " no device\n"); mutex_unlock(&dev->i2c_lock); return rc; } } else if (msgs[i].flags & I2C_M_RD) { /* read bytes */ rc = cx231xx_i2c_recv_bytes(i2c_adap, &msgs[i]); if (i2c_debug >= 2) { for (byte = 0; byte < msgs[i].len; byte++) printk(" %02x", msgs[i].buf[byte]); } } else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr && (msgs[i].len <= 2) && (bus->nr < 3)) { /* read bytes */ rc = cx231xx_i2c_recv_bytes_with_saddr(i2c_adap, &msgs[i], &msgs[i + 1]); if (i2c_debug >= 2) { for (byte = 0; byte < msgs[i].len; byte++) printk(" %02x", msgs[i].buf[byte]); } i++; } else { /* write bytes */ if (i2c_debug >= 2) { for (byte = 0; byte < msgs[i].len; byte++) printk(" %02x", msgs[i].buf[byte]); } rc = cx231xx_i2c_send_bytes(i2c_adap, &msgs[i]); } if (rc < 0) goto err; if (i2c_debug >= 2) printk("\n"); } mutex_unlock(&dev->i2c_lock); return num; err: dprintk2(2, " ERROR: %i\n", rc); mutex_unlock(&dev->i2c_lock); return rc; } /* ----------------------------------------------------------- */ /* * functionality() */ static u32 functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } static struct i2c_algorithm cx231xx_algo = { .master_xfer = cx231xx_i2c_xfer, .functionality = functionality, }; static struct i2c_adapter cx231xx_adap_template = { .owner = THIS_MODULE, .name = "cx231xx", .algo = &cx231xx_algo, }; static struct i2c_client cx231xx_client_template = { .name = "cx231xx internal", }; /* ----------------------------------------------------------- */ /* * i2c_devs * incomplete list of known devices */ static char *i2c_devs[128] = { [0x60 >> 1] = "colibri", [0x88 >> 1] = "hammerhead", [0x8e >> 1] = "CIR", [0x32 >> 1] = "GeminiIII", [0x02 >> 1] = "Aquarius", [0xa0 >> 1] = "eeprom", [0xc0 >> 1] = "tuner", [0xc2 >> 1] = "tuner", }; /* * cx231xx_do_i2c_scan() * check i2c address range for devices */ void cx231xx_do_i2c_scan(struct cx231xx *dev, struct i2c_client *c) { unsigned char buf; int i, rc; cx231xx_info(": Checking for I2C devices ..\n"); for (i = 0; i < 128; i++) { c->addr = i; rc = i2c_master_recv(c, &buf, 0); if (rc < 0) continue; cx231xx_info("%s: i2c scan: found device @ 0x%x [%s]\n", dev->name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???"); } cx231xx_info(": Completed Checking for I2C devices.\n"); } /* * cx231xx_i2c_register() * register i2c bus */ int cx231xx_i2c_register(struct cx231xx_i2c *bus) { struct cx231xx *dev = bus->dev; BUG_ON(!dev->cx231xx_send_usb_command); bus->i2c_adap = cx231xx_adap_template; bus->i2c_client = cx231xx_client_template; bus->i2c_adap.dev.parent = &dev->udev->dev; strlcpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name)); bus->i2c_adap.algo_data = bus; i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev); i2c_add_adapter(&bus->i2c_adap); bus->i2c_client.adapter = &bus->i2c_adap; if (0 == bus->i2c_rc) { if (i2c_scan) cx231xx_do_i2c_scan(dev, &bus->i2c_client); } else cx231xx_warn("%s: i2c bus %d register FAILED\n", dev->name, bus->nr); return bus->i2c_rc; } /* * cx231xx_i2c_unregister() * unregister i2c_bus */ int cx231xx_i2c_unregister(struct cx231xx_i2c *bus) { i2c_del_adapter(&bus->i2c_adap); return 0; }
gpl-2.0
EPDCenterSpain/kernel_Archos_80_Titan
sound/soc/samsung/smdk_wm8580pcm.c
2757
5152
/* * sound/soc/samsung/smdk_wm8580pcm.c * * Copyright (c) 2011 Samsung Electronics Co. Ltd * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <sound/soc.h> #include <sound/pcm_params.h> #include <sound/pcm.h> #include <asm/mach-types.h> #include "../codecs/wm8580.h" #include "dma.h" #include "pcm.h" /* * Board Settings: * o '1' means 'ON' * o '0' means 'OFF' * o 'X' means 'Don't care' * * SMDK6410, SMDK6440, SMDK6450 Base B/D: CFG1-0000, CFG2-1111 * SMDKC110, SMDKV210: CFGB11-100100, CFGB12-0000 */ #define SMDK_WM8580_EXT_OSC 12000000 #define SMDK_WM8580_EXT_MCLK 4096000 #define SMDK_WM8580_EXT_VOICE 2048000 static unsigned long mclk_freq; static unsigned long xtal_freq; /* * If MCLK clock directly gets from XTAL, we don't have to use PLL * to make MCLK, but if XTAL clock source connects with other codec * pin (like XTI), we should have to set codec's PLL to make MCLK. * Because Samsung SoC does not support pcmcdclk output like I2S. */ static int smdk_wm8580_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int rfs, ret; switch (params_rate(params)) { case 8000: break; default: printk(KERN_ERR "%s:%d Sampling Rate %u not supported!\n", __func__, __LINE__, params_rate(params)); return -EINVAL; } rfs = mclk_freq / params_rate(params) / 2; /* Set the codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* Set the cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; if (mclk_freq == xtal_freq) { ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_MCLK, mclk_freq, SND_SOC_CLOCK_IN); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK, WM8580_CLKSRC_MCLK); if (ret < 0) return ret; } else { ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA, mclk_freq, SND_SOC_CLOCK_IN); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK, WM8580_CLKSRC_PLLA); if (ret < 0) return ret; ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0, xtal_freq, mclk_freq); if (ret < 0) return ret; } /* Set PCM source clock on CPU */ ret = snd_soc_dai_set_sysclk(cpu_dai, S3C_PCM_CLKSRC_MUX, mclk_freq, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* Set SCLK_DIV for making bclk */ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_PCM_SCLK_PER_FS, rfs); if (ret < 0) return ret; return 0; } static struct snd_soc_ops smdk_wm8580_pcm_ops = { .hw_params = smdk_wm8580_pcm_hw_params, }; static struct snd_soc_dai_link smdk_dai[] = { { .name = "WM8580 PAIF PCM RX", .stream_name = "Playback", .cpu_dai_name = "samsung-pcm.0", .codec_dai_name = "wm8580-hifi-playback", .platform_name = "samsung-audio", .codec_name = "wm8580-codec.0-001b", .ops = &smdk_wm8580_pcm_ops, }, { .name = "WM8580 PAIF PCM TX", .stream_name = "Capture", .cpu_dai_name = "samsung-pcm.0", .codec_dai_name = "wm8580-hifi-capture", .platform_name = "samsung-audio", .codec_name = "wm8580-codec.0-001b", .ops = &smdk_wm8580_pcm_ops, }, }; static struct snd_soc_card smdk_pcm = { .name = "SMDK-PCM", .dai_link = smdk_dai, .num_links = 2, }; /* * After SMDKC110 Base Board's Rev is '0.1', 12MHz External OSC(X1) * is absent (or not connected), so we connect EXT_VOICE_CLK(OSC4), * 2.0484Mhz, directly with MCLK both Codec and SoC. */ static int __devinit snd_smdk_probe(struct platform_device *pdev) { int ret = 0; xtal_freq = SMDK_WM8580_EXT_OSC; mclk_freq = SMDK_WM8580_EXT_MCLK; if (machine_is_smdkc110() || machine_is_smdkv210()) xtal_freq = mclk_freq = SMDK_WM8580_EXT_VOICE; smdk_pcm.dev = &pdev->dev; ret = snd_soc_register_card(&smdk_pcm); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret); return ret; } return 0; } static int __devexit snd_smdk_remove(struct platform_device *pdev) { snd_soc_unregister_card(&smdk_pcm); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver snd_smdk_driver = { .driver = { .owner = THIS_MODULE, .name = "samsung-smdk-pcm", }, .probe = snd_smdk_probe, .remove = __devexit_p(snd_smdk_remove), }; static int __init smdk_audio_init(void) { return platform_driver_register(&snd_smdk_driver); } module_init(smdk_audio_init); static void __exit smdk_audio_exit(void) { platform_driver_unregister(&snd_smdk_driver); } module_exit(smdk_audio_exit); MODULE_AUTHOR("Sangbeom Kim, <sbkim73@samsung.com>"); MODULE_DESCRIPTION("ALSA SoC SMDK WM8580 for PCM"); MODULE_LICENSE("GPL");
gpl-2.0
nimengyu2/dm37x-kernel-2.6.37-psp04.02.00.07
drivers/ide/ide-eh.c
3269
12344
#include <linux/kernel.h> #include <linux/ide.h> #include <linux/delay.h> static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) { ide_hwif_t *hwif = drive->hwif; if ((stat & ATA_BUSY) || ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { /* other bits are useless when BUSY */ rq->errors |= ERROR_RESET; } else if (stat & ATA_ERR) { /* err has different meaning on cdrom and tape */ if (err == ATA_ABORTED) { if ((drive->dev_flags & IDE_DFLAG_LBA) && /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */ hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS) return ide_stopped; } else if ((err & BAD_CRC) == BAD_CRC) { /* UDMA crc error, just retry the operation */ drive->crc_count++; } else if (err & (ATA_BBK | ATA_UNC)) { /* retries won't help these */ rq->errors = ERROR_MAX; } else if (err & ATA_TRK0NF) { /* help it find track zero */ rq->errors |= ERROR_RECAL; } } if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { int nsect = drive->mult_count ? drive->mult_count : 1; ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); } if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { ide_kill_rq(drive, rq); return ide_stopped; } if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) rq->errors |= ERROR_RESET; if ((rq->errors & ERROR_RESET) == ERROR_RESET) { ++rq->errors; return ide_do_reset(drive); } if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) drive->special_flags |= IDE_SFLAG_RECALIBRATE; ++rq->errors; return ide_stopped; } static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) { ide_hwif_t *hwif = drive->hwif; if ((stat & ATA_BUSY) || ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { /* other bits are useless when BUSY */ rq->errors |= ERROR_RESET; } else { /* add decoding error stuff */ } if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) /* force an abort */ hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); if (rq->errors >= ERROR_MAX) { ide_kill_rq(drive, rq); } else { if ((rq->errors & ERROR_RESET) == ERROR_RESET) { ++rq->errors; return ide_do_reset(drive); } ++rq->errors; } return ide_stopped; } static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) { if (drive->media == ide_disk) return ide_ata_error(drive, rq, stat, err); return ide_atapi_error(drive, rq, stat, err); } /** * ide_error - handle an error on the IDE * @drive: drive the error occurred on * @msg: message to report * @stat: status bits * * ide_error() takes action based on the error returned by the drive. * For normal I/O that may well include retries. We deal with * both new-style (taskfile) and old style command handling here. * In the case of taskfile command handling there is work left to * do */ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) { struct request *rq; u8 err; err = ide_dump_status(drive, msg, stat); rq = drive->hwif->rq; if (rq == NULL) return ide_stopped; /* retry only "normal" I/O: */ if (rq->cmd_type != REQ_TYPE_FS) { if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { struct ide_cmd *cmd = rq->special; if (cmd) ide_complete_cmd(drive, cmd, stat, err); } else if (blk_pm_request(rq)) { rq->errors = 1; ide_complete_pm_rq(drive, rq); return ide_stopped; } rq->errors = err; ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); return ide_stopped; } return __ide_error(drive, rq, stat, err); } EXPORT_SYMBOL_GPL(ide_error); static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) { struct request *rq = drive->hwif->rq; if (rq && rq->cmd_type == REQ_TYPE_SPECIAL && rq->cmd[0] == REQ_DRIVE_RESET) { if (err <= 0 && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); } } /* needed below */ static ide_startstop_t do_reset1(ide_drive_t *, int); /* * atapi_reset_pollfunc() gets invoked to poll the interface for completion * every 50ms during an atapi drive reset operation. If the drive has not yet * responded, and we have not yet hit our maximum waiting time, then the timer * is restarted for another 50ms. */ static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; u8 stat; tp_ops->dev_select(drive); udelay(10); stat = tp_ops->read_status(hwif); if (OK_STAT(stat, 0, ATA_BUSY)) printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name); else { if (time_before(jiffies, hwif->poll_timeout)) { ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20); /* continue polling */ return ide_started; } /* end of polling */ hwif->polling = 0; printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n", drive->name, stat); /* do it the old fashioned way */ return do_reset1(drive, 1); } /* done polling */ hwif->polling = 0; ide_complete_drive_reset(drive, 0); return ide_stopped; } static void ide_reset_report_error(ide_hwif_t *hwif, u8 err) { static const char *err_master_vals[] = { NULL, "passed", "formatter device error", "sector buffer error", "ECC circuitry error", "controlling MPU error" }; u8 err_master = err & 0x7f; printk(KERN_ERR "%s: reset: master: ", hwif->name); if (err_master && err_master < 6) printk(KERN_CONT "%s", err_master_vals[err_master]); else printk(KERN_CONT "error (0x%02x?)", err); if (err & 0x80) printk(KERN_CONT "; slave: failed"); printk(KERN_CONT "\n"); } /* * reset_pollfunc() gets invoked to poll the interface for completion every 50ms * during an ide reset operation. If the drives have not yet responded, * and we have not yet hit our maximum waiting time, then the timer is restarted * for another 50ms. */ static ide_startstop_t reset_pollfunc(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; const struct ide_port_ops *port_ops = hwif->port_ops; u8 tmp; int err = 0; if (port_ops && port_ops->reset_poll) { err = port_ops->reset_poll(drive); if (err) { printk(KERN_ERR "%s: host reset_poll failure for %s.\n", hwif->name, drive->name); goto out; } } tmp = hwif->tp_ops->read_status(hwif); if (!OK_STAT(tmp, 0, ATA_BUSY)) { if (time_before(jiffies, hwif->poll_timeout)) { ide_set_handler(drive, &reset_pollfunc, HZ/20); /* continue polling */ return ide_started; } printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n", hwif->name, tmp); drive->failures++; err = -EIO; } else { tmp = ide_read_error(drive); if (tmp == 1) { printk(KERN_INFO "%s: reset: success\n", hwif->name); drive->failures = 0; } else { ide_reset_report_error(hwif, tmp); drive->failures++; err = -EIO; } } out: hwif->polling = 0; /* done polling */ ide_complete_drive_reset(drive, err); return ide_stopped; } static void ide_disk_pre_reset(ide_drive_t *drive) { int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; drive->special_flags = legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0; drive->mult_count = 0; drive->dev_flags &= ~IDE_DFLAG_PARKED; if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 && (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) drive->mult_req = 0; if (drive->mult_req != drive->mult_count) drive->special_flags |= IDE_SFLAG_SET_MULTMODE; } static void pre_reset(ide_drive_t *drive) { const struct ide_port_ops *port_ops = drive->hwif->port_ops; if (drive->media == ide_disk) ide_disk_pre_reset(drive); else drive->dev_flags |= IDE_DFLAG_POST_RESET; if (drive->dev_flags & IDE_DFLAG_USING_DMA) { if (drive->crc_count) ide_check_dma_crc(drive); else ide_dma_off(drive); } if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) { if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) { drive->dev_flags &= ~IDE_DFLAG_UNMASK; drive->io_32bit = 0; } return; } if (port_ops && port_ops->pre_reset) port_ops->pre_reset(drive); if (drive->current_speed != 0xff) drive->desired_speed = drive->current_speed; drive->current_speed = 0xff; } /* * do_reset1() attempts to recover a confused drive by resetting it. * Unfortunately, resetting a disk drive actually resets all devices on * the same interface, so it can really be thought of as resetting the * interface rather than resetting the drive. * * ATAPI devices have their own reset mechanism which allows them to be * individually reset without clobbering other devices on the same interface. * * Unfortunately, the IDE interface does not generate an interrupt to let * us know when the reset operation has finished, so we must poll for this. * Equally poor, though, is the fact that this may a very long time to complete, * (up to 30 seconds worstcase). So, instead of busy-waiting here for it, * we set a timer to poll at 50ms intervals. */ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi) { ide_hwif_t *hwif = drive->hwif; struct ide_io_ports *io_ports = &hwif->io_ports; const struct ide_tp_ops *tp_ops = hwif->tp_ops; const struct ide_port_ops *port_ops; ide_drive_t *tdrive; unsigned long flags, timeout; int i; DEFINE_WAIT(wait); spin_lock_irqsave(&hwif->lock, flags); /* We must not reset with running handlers */ BUG_ON(hwif->handler != NULL); /* For an ATAPI device, first try an ATAPI SRST. */ if (drive->media != ide_disk && !do_not_try_atapi) { pre_reset(drive); tp_ops->dev_select(drive); udelay(20); tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); ndelay(400); hwif->poll_timeout = jiffies + WAIT_WORSTCASE; hwif->polling = 1; __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20); spin_unlock_irqrestore(&hwif->lock, flags); return ide_started; } /* We must not disturb devices in the IDE_DFLAG_PARKED state. */ do { unsigned long now; prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE); timeout = jiffies; ide_port_for_each_present_dev(i, tdrive, hwif) { if ((tdrive->dev_flags & IDE_DFLAG_PARKED) && time_after(tdrive->sleep, timeout)) timeout = tdrive->sleep; } now = jiffies; if (time_before_eq(timeout, now)) break; spin_unlock_irqrestore(&hwif->lock, flags); timeout = schedule_timeout_uninterruptible(timeout - now); spin_lock_irqsave(&hwif->lock, flags); } while (timeout); finish_wait(&ide_park_wq, &wait); /* * First, reset any device state data we were maintaining * for any of the drives on this interface. */ ide_port_for_each_dev(i, tdrive, hwif) pre_reset(tdrive); if (io_ports->ctl_addr == 0) { spin_unlock_irqrestore(&hwif->lock, flags); ide_complete_drive_reset(drive, -ENXIO); return ide_stopped; } /* * Note that we also set nIEN while resetting the device, * to mask unwanted interrupts from the interface during the reset. * However, due to the design of PC hardware, this will cause an * immediate interrupt due to the edge transition it produces. * This single interrupt gives us a "fast poll" for drives that * recover from reset very quickly, saving us the first 50ms wait time. */ /* set SRST and nIEN */ tp_ops->write_devctl(hwif, ATA_SRST | ATA_NIEN | ATA_DEVCTL_OBS); /* more than enough time */ udelay(10); /* clear SRST, leave nIEN (unless device is on the quirk list) */ tp_ops->write_devctl(hwif, ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) | ATA_DEVCTL_OBS); /* more than enough time */ udelay(10); hwif->poll_timeout = jiffies + WAIT_WORSTCASE; hwif->polling = 1; __ide_set_handler(drive, &reset_pollfunc, HZ/20); /* * Some weird controller like resetting themselves to a strange * state when the disks are reset this way. At least, the Winbond * 553 documentation says that */ port_ops = hwif->port_ops; if (port_ops && port_ops->resetproc) port_ops->resetproc(drive); spin_unlock_irqrestore(&hwif->lock, flags); return ide_started; } /* * ide_do_reset() is the entry point to the drive/interface reset code. */ ide_startstop_t ide_do_reset(ide_drive_t *drive) { return do_reset1(drive, 0); } EXPORT_SYMBOL(ide_do_reset);
gpl-2.0
tohenk/android_kernel_samsung_smdk4x12
drivers/pci/vpd.c
4037
1166
/* * File: vpd.c * Purpose: Provide PCI VPD support * * Copyright (C) 2010 Broadcom Corporation. */ #include <linux/pci.h> int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt) { int i; for (i = off; i < len; ) { u8 val = buf[i]; if (val & PCI_VPD_LRDT) { /* Don't return success of the tag isn't complete */ if (i + PCI_VPD_LRDT_TAG_SIZE > len) break; if (val == rdt) return i; i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(&buf[i]); } else { u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK; if (tag == rdt) return i; if (tag == PCI_VPD_SRDT_END) break; i += PCI_VPD_SRDT_TAG_SIZE + pci_vpd_srdt_size(&buf[i]); } } return -ENOENT; } EXPORT_SYMBOL_GPL(pci_vpd_find_tag); int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, unsigned int len, const char *kw) { int i; for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) { if (buf[i + 0] == kw[0] && buf[i + 1] == kw[1]) return i; i += PCI_VPD_INFO_FLD_HDR_SIZE + pci_vpd_info_field_size(&buf[i]); } return -ENOENT; } EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
gpl-2.0
CyanCom/android_kernel_msm_caf
arch/mips/bcm63xx/boards/board_bcm963xx.c
4549
16152
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/ssb/ssb.h> #include <asm/addrspace.h> #include <bcm63xx_board.h> #include <bcm63xx_cpu.h> #include <bcm63xx_dev_uart.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> #include <bcm63xx_dev_pci.h> #include <bcm63xx_dev_enet.h> #include <bcm63xx_dev_dsp.h> #include <bcm63xx_dev_pcmcia.h> #include <board_bcm963xx.h> #define PFX "board_bcm963xx: " static struct bcm963xx_nvram nvram; static unsigned int mac_addr_used; static struct board_info board; /* * known 6338 boards */ #ifdef CONFIG_BCM63XX_CPU_6338 static struct board_info __initdata board_96338gw = { .name = "96338GW", .expected_cpu_id = 0x6338, .has_uart0 = 1, .has_enet0 = 1, .enet0 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .leds = { { .name = "adsl", .gpio = 3, .active_low = 1, }, { .name = "ses", .gpio = 5, .active_low = 1, }, { .name = "ppp-fail", .gpio = 4, .active_low = 1, }, { .name = "power", .gpio = 0, .active_low = 1, .default_trigger = "default-on", }, { .name = "stop", .gpio = 1, .active_low = 1, } }, }; static struct board_info __initdata board_96338w = { .name = "96338W", .expected_cpu_id = 0x6338, .has_uart0 = 1, .has_enet0 = 1, .enet0 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .leds = { { .name = "adsl", .gpio = 3, .active_low = 1, }, { .name = "ses", .gpio = 5, .active_low = 1, }, { .name = "ppp-fail", .gpio = 4, .active_low = 1, }, { .name = "power", .gpio = 0, .active_low = 1, .default_trigger = "default-on", }, { .name = "stop", .gpio = 1, .active_low = 1, }, }, }; #endif /* * known 6345 boards */ #ifdef CONFIG_BCM63XX_CPU_6345 static struct board_info __initdata board_96345gw2 = { .name = "96345GW2", .expected_cpu_id = 0x6345, .has_uart0 = 1, }; #endif /* * known 6348 boards */ #ifdef CONFIG_BCM63XX_CPU_6348 static struct board_info __initdata board_96348r = { .name = "96348R", .expected_cpu_id = 0x6348, .has_uart0 = 1, .has_enet0 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .leds = { { .name = "adsl-fail", .gpio = 2, .active_low = 1, }, { .name = "ppp", .gpio = 3, .active_low = 1, }, { .name = "ppp-fail", .gpio = 4, .active_low = 1, }, { .name = "power", .gpio = 0, .active_low = 1, .default_trigger = "default-on", }, { .name = "stop", .gpio = 1, .active_low = 1, }, }, }; static struct board_info __initdata board_96348gw_10 = { .name = "96348GW-10", .expected_cpu_id = 0x6348, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .has_pccard = 1, .has_ehci0 = 1, .has_dsp = 1, .dsp = { .gpio_rst = 6, .gpio_int = 34, .cs = 2, .ext_irq = 2, }, .leds = { { .name = "adsl-fail", .gpio = 2, .active_low = 1, }, { .name = "ppp", .gpio = 3, .active_low = 1, }, { .name = "ppp-fail", .gpio = 4, .active_low = 1, }, { .name = "power", .gpio = 0, .active_low = 1, .default_trigger = "default-on", }, { .name = "stop", .gpio = 1, .active_low = 1, }, }, }; static struct board_info __initdata board_96348gw_11 = { .name = "96348GW-11", .expected_cpu_id = 0x6348, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .has_pccard = 1, .has_ehci0 = 1, .leds = { { .name = "adsl-fail", .gpio = 2, .active_low = 1, }, { .name = "ppp", .gpio = 3, .active_low = 1, }, { .name = "ppp-fail", .gpio = 4, .active_low = 1, }, { .name = "power", .gpio = 0, .active_low = 1, .default_trigger = "default-on", }, { .name = "stop", .gpio = 1, .active_low = 1, }, }, }; static struct board_info __initdata board_96348gw = { .name = "96348GW", .expected_cpu_id = 0x6348, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .has_dsp = 1, .dsp = { .gpio_rst = 6, .gpio_int = 34, .ext_irq = 2, .cs = 2, }, .leds = { { .name = "adsl-fail", .gpio = 2, .active_low = 1, }, { .name = "ppp", .gpio = 3, .active_low = 1, }, { .name = "ppp-fail", .gpio = 4, .active_low = 1, }, { .name = "power", .gpio = 0, .active_low = 1, .default_trigger = "default-on", }, { .name = "stop", .gpio = 1, .active_low = 1, }, }, }; static struct board_info __initdata board_FAST2404 = { .name = "F@ST2404", .expected_cpu_id = 0x6348, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .has_pccard = 1, .has_ehci0 = 1, }; static struct board_info __initdata board_rta1025w_16 = { .name = "RTA1025W_16", .expected_cpu_id = 0x6348, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, }; static struct board_info __initdata board_DV201AMR = { .name = "DV201AMR", .expected_cpu_id = 0x6348, .has_uart0 = 1, .has_pci = 1, .has_ohci0 = 1, .has_enet0 = 1, .has_enet1 = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, }; static struct board_info __initdata board_96348gw_a = { .name = "96348GW-A", .expected_cpu_id = 0x6348, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, }; #endif /* * known 6358 boards */ #ifdef CONFIG_BCM63XX_CPU_6358 static struct board_info __initdata board_96358vw = { .name = "96358VW", .expected_cpu_id = 0x6358, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .has_pccard = 1, .has_ehci0 = 1, .leds = { { .name = "adsl-fail", .gpio = 15, .active_low = 1, }, { .name = "ppp", .gpio = 22, .active_low = 1, }, { .name = "ppp-fail", .gpio = 23, .active_low = 1, }, { .name = "power", .gpio = 4, .default_trigger = "default-on", }, { .name = "stop", .gpio = 5, }, }, }; static struct board_info __initdata board_96358vw2 = { .name = "96358VW2", .expected_cpu_id = 0x6358, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .has_pccard = 1, .has_ehci0 = 1, .leds = { { .name = "adsl", .gpio = 22, .active_low = 1, }, { .name = "ppp-fail", .gpio = 23, }, { .name = "power", .gpio = 5, .active_low = 1, .default_trigger = "default-on", }, { .name = "stop", .gpio = 4, .active_low = 1, }, }, }; static struct board_info __initdata board_AGPFS0 = { .name = "AGPF-S0", .expected_cpu_id = 0x6358, .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, .has_ehci0 = 1, }; static struct board_info __initdata board_DWVS0 = { .name = "DWV-S0", .expected_cpu_id = 0x6358, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, .enet0 = { .has_phy = 1, .use_internal_phy = 1, }, .enet1 = { .force_speed_100 = 1, .force_duplex_full = 1, }, .has_ohci0 = 1, }; #endif /* * all boards */ static const struct board_info __initdata *bcm963xx_boards[] = { #ifdef CONFIG_BCM63XX_CPU_6338 &board_96338gw, &board_96338w, #endif #ifdef CONFIG_BCM63XX_CPU_6345 &board_96345gw2, #endif #ifdef CONFIG_BCM63XX_CPU_6348 &board_96348r, &board_96348gw, &board_96348gw_10, &board_96348gw_11, &board_FAST2404, &board_DV201AMR, &board_96348gw_a, &board_rta1025w_16, #endif #ifdef CONFIG_BCM63XX_CPU_6358 &board_96358vw, &board_96358vw2, &board_AGPFS0, &board_DWVS0, #endif }; /* * Register a sane SPROMv2 to make the on-board * bcm4318 WLAN work */ #ifdef CONFIG_SSB_PCIHOST static struct ssb_sprom bcm63xx_sprom = { .revision = 0x02, .board_rev = 0x17, .country_code = 0x0, .ant_available_bg = 0x3, .pa0b0 = 0x15ae, .pa0b1 = 0xfa85, .pa0b2 = 0xfe8d, .pa1b0 = 0xffff, .pa1b1 = 0xffff, .pa1b2 = 0xffff, .gpio0 = 0xff, .gpio1 = 0xff, .gpio2 = 0xff, .gpio3 = 0xff, .maxpwr_bg = 0x004c, .itssi_bg = 0x00, .boardflags_lo = 0x2848, .boardflags_hi = 0x0000, }; int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out) { if (bus->bustype == SSB_BUSTYPE_PCI) { memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom)); return 0; } else { printk(KERN_ERR PFX "unable to fill SPROM for given bustype.\n"); return -EINVAL; } } #endif /* * return board name for /proc/cpuinfo */ const char *board_get_name(void) { return board.name; } /* * register & return a new board mac address */ static int board_get_mac_address(u8 *mac) { u8 *p; int count; if (mac_addr_used >= nvram.mac_addr_count) { printk(KERN_ERR PFX "not enough mac address\n"); return -ENODEV; } memcpy(mac, nvram.mac_addr_base, ETH_ALEN); p = mac + ETH_ALEN - 1; count = mac_addr_used; while (count--) { do { (*p)++; if (*p != 0) break; p--; } while (p != mac); } if (p == mac) { printk(KERN_ERR PFX "unable to fetch mac address\n"); return -ENODEV; } mac_addr_used++; return 0; } /* * early init callback, read nvram data from flash and checksum it */ void __init board_prom_init(void) { unsigned int check_len, i; u8 *boot_addr, *cfe, *p; char cfe_version[32]; u32 val; /* read base address of boot chip select (0) */ val = bcm_mpi_readl(MPI_CSBASE_REG(0)); val &= MPI_CSBASE_BASE_MASK; boot_addr = (u8 *)KSEG1ADDR(val); /* dump cfe version */ cfe = boot_addr + BCM963XX_CFE_VERSION_OFFSET; if (!memcmp(cfe, "cfe-v", 5)) snprintf(cfe_version, sizeof(cfe_version), "%u.%u.%u-%u.%u", cfe[5], cfe[6], cfe[7], cfe[8], cfe[9]); else strcpy(cfe_version, "unknown"); printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); /* extract nvram data */ memcpy(&nvram, boot_addr + BCM963XX_NVRAM_OFFSET, sizeof(nvram)); /* check checksum before using data */ if (nvram.version <= 4) check_len = offsetof(struct bcm963xx_nvram, checksum_old); else check_len = sizeof(nvram); val = 0; p = (u8 *)&nvram; while (check_len--) val += *p; if (val) { printk(KERN_ERR PFX "invalid nvram checksum\n"); return; } /* find board by name */ for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) { if (strncmp(nvram.name, bcm963xx_boards[i]->name, sizeof(nvram.name))) continue; /* copy, board desc array is marked initdata */ memcpy(&board, bcm963xx_boards[i], sizeof(board)); break; } /* bail out if board is not found, will complain later */ if (!board.name[0]) { char name[17]; memcpy(name, nvram.name, 16); name[16] = 0; printk(KERN_ERR PFX "unknown bcm963xx board: %s\n", name); return; } /* setup pin multiplexing depending on board enabled device, * this has to be done this early since PCI init is done * inside arch_initcall */ val = 0; #ifdef CONFIG_PCI if (board.has_pci) { bcm63xx_pci_enabled = 1; if (BCMCPU_IS_6348()) val |= GPIO_MODE_6348_G2_PCI; } #endif if (board.has_pccard) { if (BCMCPU_IS_6348()) val |= GPIO_MODE_6348_G1_MII_PCCARD; } if (board.has_enet0 && !board.enet0.use_internal_phy) { if (BCMCPU_IS_6348()) val |= GPIO_MODE_6348_G3_EXT_MII | GPIO_MODE_6348_G0_EXT_MII; } if (board.has_enet1 && !board.enet1.use_internal_phy) { if (BCMCPU_IS_6348()) val |= GPIO_MODE_6348_G3_EXT_MII | GPIO_MODE_6348_G0_EXT_MII; } bcm_gpio_writel(val, GPIO_MODE_REG); } /* * second stage init callback, good time to panic if we couldn't * identify on which board we're running since early printk is working */ void __init board_setup(void) { if (!board.name[0]) panic("unable to detect bcm963xx board"); printk(KERN_INFO PFX "board name: %s\n", board.name); /* make sure we're running on expected cpu */ if (bcm63xx_get_cpu_id() != board.expected_cpu_id) panic("unexpected CPU for bcm963xx board"); } static struct mtd_partition mtd_partitions[] = { { .name = "cfe", .offset = 0x0, .size = 0x40000, } }; static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL }; static struct physmap_flash_data flash_data = { .width = 2, .nr_parts = ARRAY_SIZE(mtd_partitions), .parts = mtd_partitions, .part_probe_types = bcm63xx_part_types, }; static struct resource mtd_resources[] = { { .start = 0, /* filled at runtime */ .end = 0, /* filled at runtime */ .flags = IORESOURCE_MEM, } }; static struct platform_device mtd_dev = { .name = "physmap-flash", .resource = mtd_resources, .num_resources = ARRAY_SIZE(mtd_resources), .dev = { .platform_data = &flash_data, }, }; static struct gpio_led_platform_data bcm63xx_led_data; static struct platform_device bcm63xx_gpio_leds = { .name = "leds-gpio", .id = 0, .dev.platform_data = &bcm63xx_led_data, }; /* * third stage init callback, register all board devices. */ int __init board_register_devices(void) { u32 val; if (board.has_uart0) bcm63xx_uart_register(0); if (board.has_uart1) bcm63xx_uart_register(1); if (board.has_pccard) bcm63xx_pcmcia_register(); if (board.has_enet0 && !board_get_mac_address(board.enet0.mac_addr)) bcm63xx_enet_register(0, &board.enet0); if (board.has_enet1 && !board_get_mac_address(board.enet1.mac_addr)) bcm63xx_enet_register(1, &board.enet1); if (board.has_dsp) bcm63xx_dsp_register(&board.dsp); /* Generate MAC address for WLAN and register our SPROM, * do this after registering enet devices */ #ifdef CONFIG_SSB_PCIHOST if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); if (ssb_arch_register_fallback_sprom( &bcm63xx_get_fallback_sprom) < 0) pr_err(PFX "failed to register fallback SPROM\n"); } #endif /* read base address of boot chip select (0) */ val = bcm_mpi_readl(MPI_CSBASE_REG(0)); val &= MPI_CSBASE_BASE_MASK; mtd_resources[0].start = val; mtd_resources[0].end = 0x1FFFFFFF; platform_device_register(&mtd_dev); bcm63xx_led_data.num_leds = ARRAY_SIZE(board.leds); bcm63xx_led_data.leds = board.leds; platform_device_register(&bcm63xx_gpio_leds); return 0; }
gpl-2.0
ubports/android_kernel_oneplus_one
sound/soc/mxs/mxs-saif.c
4805
19062
/* * Copyright 2011 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/time.h> #include <linux/fsl/mxs-dma.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/saif.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/mxs.h> #include "mxs-saif.h" static struct mxs_saif *mxs_saif[2]; /* * SAIF is a little different with other normal SOC DAIs on clock using. * * For MXS, two SAIF modules are instantiated on-chip. * Each SAIF has a set of clock pins and can be operating in master * mode simultaneously if they are connected to different off-chip codecs. * Also, one of the two SAIFs can master or drive the clock pins while the * other SAIF, in slave mode, receives clocking from the master SAIF. * This also means that both SAIFs must operate at the same sample rate. * * We abstract this as each saif has a master, the master could be * himself or other saifs. In the generic saif driver, saif does not need * to know the different clkmux. Saif only needs to know who is his master * and operating his master to generate the proper clock rate for him. * The master id is provided in mach-specific layer according to different * clkmux setting. */ static int mxs_saif_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); switch (clk_id) { case MXS_SAIF_MCLK: saif->mclk = freq; break; default: return -EINVAL; } return 0; } /* * Since SAIF may work on EXTMASTER mode, IOW, it's working BITCLK&LRCLK * is provided by other SAIF, we provide a interface here to get its master * from its master_id. * Note that the master could be himself. */ static inline struct mxs_saif *mxs_saif_get_master(struct mxs_saif * saif) { return mxs_saif[saif->master_id]; } /* * Set SAIF clock and MCLK */ static int mxs_saif_set_clk(struct mxs_saif *saif, unsigned int mclk, unsigned int rate) { u32 scr; int ret; struct mxs_saif *master_saif; dev_dbg(saif->dev, "mclk %d rate %d\n", mclk, rate); /* Set master saif to generate proper clock */ master_saif = mxs_saif_get_master(saif); if (!master_saif) return -EINVAL; dev_dbg(saif->dev, "master saif%d\n", master_saif->id); /* Checking if can playback and capture simutaneously */ if (master_saif->ongoing && rate != master_saif->cur_rate) { dev_err(saif->dev, "can not change clock, master saif%d(rate %d) is ongoing\n", master_saif->id, master_saif->cur_rate); return -EINVAL; } scr = __raw_readl(master_saif->base + SAIF_CTRL); scr &= ~BM_SAIF_CTRL_BITCLK_MULT_RATE; scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE; /* * Set SAIF clock * * The SAIF clock should be either 384*fs or 512*fs. * If MCLK is used, the SAIF clk ratio need to match mclk ratio. * For 32x mclk, set saif clk as 512*fs. * For 48x mclk, set saif clk as 384*fs. * * If MCLK is not used, we just set saif clk to 512*fs. */ clk_prepare_enable(master_saif->clk); if (master_saif->mclk_in_use) { if (mclk % 32 == 0) { scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE; ret = clk_set_rate(master_saif->clk, 512 * rate); } else if (mclk % 48 == 0) { scr |= BM_SAIF_CTRL_BITCLK_BASE_RATE; ret = clk_set_rate(master_saif->clk, 384 * rate); } else { /* SAIF MCLK should be either 32x or 48x */ clk_disable_unprepare(master_saif->clk); return -EINVAL; } } else { ret = clk_set_rate(master_saif->clk, 512 * rate); scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE; } clk_disable_unprepare(master_saif->clk); if (ret) return ret; master_saif->cur_rate = rate; if (!master_saif->mclk_in_use) { __raw_writel(scr, master_saif->base + SAIF_CTRL); return 0; } /* * Program the over-sample rate for MCLK output * * The available MCLK range is 32x, 48x... 512x. The rate * could be from 8kHz to 192kH. */ switch (mclk / rate) { case 32: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(4); break; case 64: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(3); break; case 128: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(2); break; case 256: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(1); break; case 512: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(0); break; case 48: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(3); break; case 96: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(2); break; case 192: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(1); break; case 384: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(0); break; default: return -EINVAL; } __raw_writel(scr, master_saif->base + SAIF_CTRL); return 0; } /* * Put and disable MCLK. */ int mxs_saif_put_mclk(unsigned int saif_id) { struct mxs_saif *saif = mxs_saif[saif_id]; u32 stat; if (!saif) return -EINVAL; stat = __raw_readl(saif->base + SAIF_STAT); if (stat & BM_SAIF_STAT_BUSY) { dev_err(saif->dev, "error: busy\n"); return -EBUSY; } clk_disable_unprepare(saif->clk); /* disable MCLK output */ __raw_writel(BM_SAIF_CTRL_CLKGATE, saif->base + SAIF_CTRL + MXS_SET_ADDR); __raw_writel(BM_SAIF_CTRL_RUN, saif->base + SAIF_CTRL + MXS_CLR_ADDR); saif->mclk_in_use = 0; return 0; } /* * Get MCLK and set clock rate, then enable it * * This interface is used for codecs who are using MCLK provided * by saif. */ int mxs_saif_get_mclk(unsigned int saif_id, unsigned int mclk, unsigned int rate) { struct mxs_saif *saif = mxs_saif[saif_id]; u32 stat; int ret; struct mxs_saif *master_saif; if (!saif) return -EINVAL; /* Clear Reset */ __raw_writel(BM_SAIF_CTRL_SFTRST, saif->base + SAIF_CTRL + MXS_CLR_ADDR); /* FIXME: need clear clk gate for register r/w */ __raw_writel(BM_SAIF_CTRL_CLKGATE, saif->base + SAIF_CTRL + MXS_CLR_ADDR); master_saif = mxs_saif_get_master(saif); if (saif != master_saif) { dev_err(saif->dev, "can not get mclk from a non-master saif\n"); return -EINVAL; } stat = __raw_readl(saif->base + SAIF_STAT); if (stat & BM_SAIF_STAT_BUSY) { dev_err(saif->dev, "error: busy\n"); return -EBUSY; } saif->mclk_in_use = 1; ret = mxs_saif_set_clk(saif, mclk, rate); if (ret) return ret; ret = clk_prepare_enable(saif->clk); if (ret) return ret; /* enable MCLK output */ __raw_writel(BM_SAIF_CTRL_RUN, saif->base + SAIF_CTRL + MXS_SET_ADDR); return 0; } /* * SAIF DAI format configuration. * Should only be called when port is inactive. */ static int mxs_saif_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { u32 scr, stat; u32 scr0; struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); stat = __raw_readl(saif->base + SAIF_STAT); if (stat & BM_SAIF_STAT_BUSY) { dev_err(cpu_dai->dev, "error: busy\n"); return -EBUSY; } scr0 = __raw_readl(saif->base + SAIF_CTRL); scr0 = scr0 & ~BM_SAIF_CTRL_BITCLK_EDGE & ~BM_SAIF_CTRL_LRCLK_POLARITY \ & ~BM_SAIF_CTRL_JUSTIFY & ~BM_SAIF_CTRL_DELAY; scr = 0; /* DAI mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* data frame low 1clk before data */ scr |= BM_SAIF_CTRL_DELAY; scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY; break; case SND_SOC_DAIFMT_LEFT_J: /* data frame high with data */ scr &= ~BM_SAIF_CTRL_DELAY; scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY; scr &= ~BM_SAIF_CTRL_JUSTIFY; break; default: return -EINVAL; } /* DAI clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_IB_IF: scr |= BM_SAIF_CTRL_BITCLK_EDGE; scr |= BM_SAIF_CTRL_LRCLK_POLARITY; break; case SND_SOC_DAIFMT_IB_NF: scr |= BM_SAIF_CTRL_BITCLK_EDGE; scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY; break; case SND_SOC_DAIFMT_NB_IF: scr &= ~BM_SAIF_CTRL_BITCLK_EDGE; scr |= BM_SAIF_CTRL_LRCLK_POLARITY; break; case SND_SOC_DAIFMT_NB_NF: scr &= ~BM_SAIF_CTRL_BITCLK_EDGE; scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY; break; } /* * Note: We simply just support master mode since SAIF TX can only * work as master. * Here the master is relative to codec side. * Saif internally could be slave when working on EXTMASTER mode. * We just hide this to machine driver. */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: if (saif->id == saif->master_id) scr &= ~BM_SAIF_CTRL_SLAVE_MODE; else scr |= BM_SAIF_CTRL_SLAVE_MODE; __raw_writel(scr | scr0, saif->base + SAIF_CTRL); break; default: return -EINVAL; } return 0; } static int mxs_saif_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); snd_soc_dai_set_dma_data(cpu_dai, substream, &saif->dma_param); /* clear error status to 0 for each re-open */ saif->fifo_underrun = 0; saif->fifo_overrun = 0; /* Clear Reset for normal operations */ __raw_writel(BM_SAIF_CTRL_SFTRST, saif->base + SAIF_CTRL + MXS_CLR_ADDR); /* clear clock gate */ __raw_writel(BM_SAIF_CTRL_CLKGATE, saif->base + SAIF_CTRL + MXS_CLR_ADDR); return 0; } /* * Should only be called when port is inactive. * although can be called multiple times by upper layers. */ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); u32 scr, stat; int ret; /* mclk should already be set */ if (!saif->mclk && saif->mclk_in_use) { dev_err(cpu_dai->dev, "set mclk first\n"); return -EINVAL; } stat = __raw_readl(saif->base + SAIF_STAT); if (stat & BM_SAIF_STAT_BUSY) { dev_err(cpu_dai->dev, "error: busy\n"); return -EBUSY; } /* * Set saif clk based on sample rate. * If mclk is used, we also set mclk, if not, saif->mclk is * default 0, means not used. */ ret = mxs_saif_set_clk(saif, saif->mclk, params_rate(params)); if (ret) { dev_err(cpu_dai->dev, "unable to get proper clk\n"); return ret; } scr = __raw_readl(saif->base + SAIF_CTRL); scr &= ~BM_SAIF_CTRL_WORD_LENGTH; scr &= ~BM_SAIF_CTRL_BITCLK_48XFS_ENABLE; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: scr |= BF_SAIF_CTRL_WORD_LENGTH(0); break; case SNDRV_PCM_FORMAT_S20_3LE: scr |= BF_SAIF_CTRL_WORD_LENGTH(4); scr |= BM_SAIF_CTRL_BITCLK_48XFS_ENABLE; break; case SNDRV_PCM_FORMAT_S24_LE: scr |= BF_SAIF_CTRL_WORD_LENGTH(8); scr |= BM_SAIF_CTRL_BITCLK_48XFS_ENABLE; break; default: return -EINVAL; } /* Tx/Rx config */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* enable TX mode */ scr &= ~BM_SAIF_CTRL_READ_MODE; } else { /* enable RX mode */ scr |= BM_SAIF_CTRL_READ_MODE; } __raw_writel(scr, saif->base + SAIF_CTRL); return 0; } static int mxs_saif_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); /* enable FIFO error irqs */ __raw_writel(BM_SAIF_CTRL_FIFO_ERROR_IRQ_EN, saif->base + SAIF_CTRL + MXS_SET_ADDR); return 0; } static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *cpu_dai) { struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); struct mxs_saif *master_saif; u32 delay; master_saif = mxs_saif_get_master(saif); if (!master_saif) return -EINVAL; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: dev_dbg(cpu_dai->dev, "start\n"); clk_enable(master_saif->clk); if (!master_saif->mclk_in_use) __raw_writel(BM_SAIF_CTRL_RUN, master_saif->base + SAIF_CTRL + MXS_SET_ADDR); /* * If the saif's master is not himself, we also need to enable * itself clk for its internal basic logic to work. */ if (saif != master_saif) { clk_enable(saif->clk); __raw_writel(BM_SAIF_CTRL_RUN, saif->base + SAIF_CTRL + MXS_SET_ADDR); } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* * write a data to saif data register to trigger * the transfer */ __raw_writel(0, saif->base + SAIF_DATA); } else { /* * read a data from saif data register to trigger * the receive */ __raw_readl(saif->base + SAIF_DATA); } master_saif->ongoing = 1; dev_dbg(saif->dev, "CTRL 0x%x STAT 0x%x\n", __raw_readl(saif->base + SAIF_CTRL), __raw_readl(saif->base + SAIF_STAT)); dev_dbg(master_saif->dev, "CTRL 0x%x STAT 0x%x\n", __raw_readl(master_saif->base + SAIF_CTRL), __raw_readl(master_saif->base + SAIF_STAT)); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: dev_dbg(cpu_dai->dev, "stop\n"); /* wait a while for the current sample to complete */ delay = USEC_PER_SEC / master_saif->cur_rate; if (!master_saif->mclk_in_use) { __raw_writel(BM_SAIF_CTRL_RUN, master_saif->base + SAIF_CTRL + MXS_CLR_ADDR); udelay(delay); } clk_disable(master_saif->clk); if (saif != master_saif) { __raw_writel(BM_SAIF_CTRL_RUN, saif->base + SAIF_CTRL + MXS_CLR_ADDR); udelay(delay); clk_disable(saif->clk); } master_saif->ongoing = 0; break; default: return -EINVAL; } return 0; } #define MXS_SAIF_RATES SNDRV_PCM_RATE_8000_192000 #define MXS_SAIF_FORMATS \ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops mxs_saif_dai_ops = { .startup = mxs_saif_startup, .trigger = mxs_saif_trigger, .prepare = mxs_saif_prepare, .hw_params = mxs_saif_hw_params, .set_sysclk = mxs_saif_set_dai_sysclk, .set_fmt = mxs_saif_set_dai_fmt, }; static int mxs_saif_dai_probe(struct snd_soc_dai *dai) { struct mxs_saif *saif = dev_get_drvdata(dai->dev); snd_soc_dai_set_drvdata(dai, saif); return 0; } static struct snd_soc_dai_driver mxs_saif_dai = { .name = "mxs-saif", .probe = mxs_saif_dai_probe, .playback = { .channels_min = 2, .channels_max = 2, .rates = MXS_SAIF_RATES, .formats = MXS_SAIF_FORMATS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = MXS_SAIF_RATES, .formats = MXS_SAIF_FORMATS, }, .ops = &mxs_saif_dai_ops, }; static irqreturn_t mxs_saif_irq(int irq, void *dev_id) { struct mxs_saif *saif = dev_id; unsigned int stat; stat = __raw_readl(saif->base + SAIF_STAT); if (!(stat & (BM_SAIF_STAT_FIFO_UNDERFLOW_IRQ | BM_SAIF_STAT_FIFO_OVERFLOW_IRQ))) return IRQ_NONE; if (stat & BM_SAIF_STAT_FIFO_UNDERFLOW_IRQ) { dev_dbg(saif->dev, "underrun!!! %d\n", ++saif->fifo_underrun); __raw_writel(BM_SAIF_STAT_FIFO_UNDERFLOW_IRQ, saif->base + SAIF_STAT + MXS_CLR_ADDR); } if (stat & BM_SAIF_STAT_FIFO_OVERFLOW_IRQ) { dev_dbg(saif->dev, "overrun!!! %d\n", ++saif->fifo_overrun); __raw_writel(BM_SAIF_STAT_FIFO_OVERFLOW_IRQ, saif->base + SAIF_STAT + MXS_CLR_ADDR); } dev_dbg(saif->dev, "SAIF_CTRL %x SAIF_STAT %x\n", __raw_readl(saif->base + SAIF_CTRL), __raw_readl(saif->base + SAIF_STAT)); return IRQ_HANDLED; } static int mxs_saif_probe(struct platform_device *pdev) { struct resource *iores, *dmares; struct mxs_saif *saif; struct mxs_saif_platform_data *pdata; int ret = 0; if (pdev->id >= ARRAY_SIZE(mxs_saif)) return -EINVAL; saif = devm_kzalloc(&pdev->dev, sizeof(*saif), GFP_KERNEL); if (!saif) return -ENOMEM; mxs_saif[pdev->id] = saif; saif->id = pdev->id; pdata = pdev->dev.platform_data; if (pdata && !pdata->master_mode) { saif->master_id = pdata->master_id; if (saif->master_id < 0 || saif->master_id >= ARRAY_SIZE(mxs_saif) || saif->master_id == saif->id) { dev_err(&pdev->dev, "get wrong master id\n"); return -EINVAL; } } else { saif->master_id = saif->id; } saif->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(saif->clk)) { ret = PTR_ERR(saif->clk); dev_err(&pdev->dev, "Cannot get the clock: %d\n", ret); return ret; } iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); saif->base = devm_request_and_ioremap(&pdev->dev, iores); if (!saif->base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENODEV; goto failed_get_resource; } dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmares) { ret = -ENODEV; dev_err(&pdev->dev, "failed to get dma resource: %d\n", ret); goto failed_get_resource; } saif->dma_param.chan_num = dmares->start; saif->irq = platform_get_irq(pdev, 0); if (saif->irq < 0) { ret = saif->irq; dev_err(&pdev->dev, "failed to get irq resource: %d\n", ret); goto failed_get_resource; } saif->dev = &pdev->dev; ret = devm_request_irq(&pdev->dev, saif->irq, mxs_saif_irq, 0, "mxs-saif", saif); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); goto failed_get_resource; } saif->dma_param.chan_irq = platform_get_irq(pdev, 1); if (saif->dma_param.chan_irq < 0) { ret = saif->dma_param.chan_irq; dev_err(&pdev->dev, "failed to get dma irq resource: %d\n", ret); goto failed_get_resource; } platform_set_drvdata(pdev, saif); ret = snd_soc_register_dai(&pdev->dev, &mxs_saif_dai); if (ret) { dev_err(&pdev->dev, "register DAI failed\n"); goto failed_get_resource; } saif->soc_platform_pdev = platform_device_alloc( "mxs-pcm-audio", pdev->id); if (!saif->soc_platform_pdev) { ret = -ENOMEM; goto failed_pdev_alloc; } platform_set_drvdata(saif->soc_platform_pdev, saif); ret = platform_device_add(saif->soc_platform_pdev); if (ret) { dev_err(&pdev->dev, "failed to add soc platform device\n"); goto failed_pdev_add; } return 0; failed_pdev_add: platform_device_put(saif->soc_platform_pdev); failed_pdev_alloc: snd_soc_unregister_dai(&pdev->dev); failed_get_resource: clk_put(saif->clk); return ret; } static int __devexit mxs_saif_remove(struct platform_device *pdev) { struct mxs_saif *saif = platform_get_drvdata(pdev); platform_device_unregister(saif->soc_platform_pdev); snd_soc_unregister_dai(&pdev->dev); clk_put(saif->clk); return 0; } static struct platform_driver mxs_saif_driver = { .probe = mxs_saif_probe, .remove = __devexit_p(mxs_saif_remove), .driver = { .name = "mxs-saif", .owner = THIS_MODULE, }, }; module_platform_driver(mxs_saif_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("MXS ASoC SAIF driver"); MODULE_LICENSE("GPL");
gpl-2.0
Android-L-Porting-Team/android_kernel_mako
drivers/mfd/pcf50633-core.c
4805
8041
/* NXP PCF50633 Power Management Unit (PMU) driver * * (C) 2006-2008 by Openmoko, Inc. * Author: Harald Welte <laforge@openmoko.org> * Balaji Rao <balajirrao@openmoko.org> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/regmap.h> #include <linux/err.h> #include <linux/mfd/pcf50633/core.h> /* Read a block of up to 32 regs */ int pcf50633_read_block(struct pcf50633 *pcf, u8 reg, int nr_regs, u8 *data) { int ret; ret = regmap_raw_read(pcf->regmap, reg, data, nr_regs); if (ret != 0) return ret; return nr_regs; } EXPORT_SYMBOL_GPL(pcf50633_read_block); /* Write a block of up to 32 regs */ int pcf50633_write_block(struct pcf50633 *pcf , u8 reg, int nr_regs, u8 *data) { return regmap_raw_write(pcf->regmap, reg, data, nr_regs); } EXPORT_SYMBOL_GPL(pcf50633_write_block); u8 pcf50633_reg_read(struct pcf50633 *pcf, u8 reg) { unsigned int val; int ret; ret = regmap_read(pcf->regmap, reg, &val); if (ret < 0) return -1; return val; } EXPORT_SYMBOL_GPL(pcf50633_reg_read); int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val) { return regmap_write(pcf->regmap, reg, val); } EXPORT_SYMBOL_GPL(pcf50633_reg_write); int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val) { return regmap_update_bits(pcf->regmap, reg, mask, val); } EXPORT_SYMBOL_GPL(pcf50633_reg_set_bit_mask); int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 val) { return regmap_update_bits(pcf->regmap, reg, val, 0); } EXPORT_SYMBOL_GPL(pcf50633_reg_clear_bits); /* sysfs attributes */ static ssize_t show_dump_regs(struct device *dev, struct device_attribute *attr, char *buf) { struct pcf50633 *pcf = dev_get_drvdata(dev); u8 dump[16]; int n, n1, idx = 0; char *buf1 = buf; static u8 address_no_read[] = { /* must be ascending */ PCF50633_REG_INT1, PCF50633_REG_INT2, PCF50633_REG_INT3, PCF50633_REG_INT4, PCF50633_REG_INT5, 0 /* terminator */ }; for (n = 0; n < 256; n += sizeof(dump)) { for (n1 = 0; n1 < sizeof(dump); n1++) if (n == address_no_read[idx]) { idx++; dump[n1] = 0x00; } else dump[n1] = pcf50633_reg_read(pcf, n + n1); hex_dump_to_buffer(dump, sizeof(dump), 16, 1, buf1, 128, 0); buf1 += strlen(buf1); *buf1++ = '\n'; *buf1 = '\0'; } return buf1 - buf; } static DEVICE_ATTR(dump_regs, 0400, show_dump_regs, NULL); static ssize_t show_resume_reason(struct device *dev, struct device_attribute *attr, char *buf) { struct pcf50633 *pcf = dev_get_drvdata(dev); int n; n = sprintf(buf, "%02x%02x%02x%02x%02x\n", pcf->resume_reason[0], pcf->resume_reason[1], pcf->resume_reason[2], pcf->resume_reason[3], pcf->resume_reason[4]); return n; } static DEVICE_ATTR(resume_reason, 0400, show_resume_reason, NULL); static struct attribute *pcf_sysfs_entries[] = { &dev_attr_dump_regs.attr, &dev_attr_resume_reason.attr, NULL, }; static struct attribute_group pcf_attr_group = { .name = NULL, /* put in device directory */ .attrs = pcf_sysfs_entries, }; static void pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, struct platform_device **pdev) { int ret; *pdev = platform_device_alloc(name, -1); if (!*pdev) { dev_err(pcf->dev, "Falied to allocate %s\n", name); return; } (*pdev)->dev.parent = pcf->dev; ret = platform_device_add(*pdev); if (ret) { dev_err(pcf->dev, "Failed to register %s: %d\n", name, ret); platform_device_put(*pdev); *pdev = NULL; } } #ifdef CONFIG_PM_SLEEP static int pcf50633_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct pcf50633 *pcf = i2c_get_clientdata(client); return pcf50633_irq_suspend(pcf); } static int pcf50633_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct pcf50633 *pcf = i2c_get_clientdata(client); return pcf50633_irq_resume(pcf); } #endif static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume); static struct regmap_config pcf50633_regmap_config = { .reg_bits = 8, .val_bits = 8, }; static int __devinit pcf50633_probe(struct i2c_client *client, const struct i2c_device_id *ids) { struct pcf50633 *pcf; struct pcf50633_platform_data *pdata = client->dev.platform_data; int i, ret; int version, variant; if (!client->irq) { dev_err(&client->dev, "Missing IRQ\n"); return -ENOENT; } pcf = kzalloc(sizeof(*pcf), GFP_KERNEL); if (!pcf) return -ENOMEM; pcf->pdata = pdata; mutex_init(&pcf->lock); pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config); if (IS_ERR(pcf->regmap)) { ret = PTR_ERR(pcf->regmap); dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret); goto err_free; } i2c_set_clientdata(client, pcf); pcf->dev = &client->dev; version = pcf50633_reg_read(pcf, 0); variant = pcf50633_reg_read(pcf, 1); if (version < 0 || variant < 0) { dev_err(pcf->dev, "Unable to probe pcf50633\n"); ret = -ENODEV; goto err_regmap; } dev_info(pcf->dev, "Probed device version %d variant %d\n", version, variant); pcf50633_irq_init(pcf, client->irq); /* Create sub devices */ pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev); pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev); pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev); pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev); pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev); for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { struct platform_device *pdev; pdev = platform_device_alloc("pcf50633-regltr", i); if (!pdev) { dev_err(pcf->dev, "Cannot create regulator %d\n", i); continue; } pdev->dev.parent = pcf->dev; platform_device_add_data(pdev, &pdata->reg_init_data[i], sizeof(pdata->reg_init_data[i])); pcf->regulator_pdev[i] = pdev; platform_device_add(pdev); } ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group); if (ret) dev_err(pcf->dev, "error creating sysfs entries\n"); if (pdata->probe_done) pdata->probe_done(pcf); return 0; err_regmap: regmap_exit(pcf->regmap); err_free: kfree(pcf); return ret; } static int __devexit pcf50633_remove(struct i2c_client *client) { struct pcf50633 *pcf = i2c_get_clientdata(client); int i; sysfs_remove_group(&client->dev.kobj, &pcf_attr_group); pcf50633_irq_free(pcf); platform_device_unregister(pcf->input_pdev); platform_device_unregister(pcf->rtc_pdev); platform_device_unregister(pcf->mbc_pdev); platform_device_unregister(pcf->adc_pdev); platform_device_unregister(pcf->bl_pdev); for (i = 0; i < PCF50633_NUM_REGULATORS; i++) platform_device_unregister(pcf->regulator_pdev[i]); regmap_exit(pcf->regmap); kfree(pcf); return 0; } static const struct i2c_device_id pcf50633_id_table[] = { {"pcf50633", 0x73}, {/* end of list */} }; MODULE_DEVICE_TABLE(i2c, pcf50633_id_table); static struct i2c_driver pcf50633_driver = { .driver = { .name = "pcf50633", .pm = &pcf50633_pm, }, .id_table = pcf50633_id_table, .probe = pcf50633_probe, .remove = __devexit_p(pcf50633_remove), }; static int __init pcf50633_init(void) { return i2c_add_driver(&pcf50633_driver); } static void __exit pcf50633_exit(void) { i2c_del_driver(&pcf50633_driver); } MODULE_DESCRIPTION("I2C chip driver for NXP PCF50633 PMU"); MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>"); MODULE_LICENSE("GPL"); subsys_initcall(pcf50633_init); module_exit(pcf50633_exit);
gpl-2.0
wwwhana/android_kernel_sony_wukong
drivers/net/ethernet/dec/tulip/uli526x.c
4805
48261
/* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "uli526x" #define DRV_VERSION "0.9.3" #define DRV_RELDATE "2005-7-29" #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/bitops.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/uaccess.h> /* Board/System/Debug information/definition ---------------- */ #define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ #define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/ #define ULI526X_IO_SIZE 0x100 #define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */ #define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */ #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */ #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */ #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT) #define TX_BUF_ALLOC 0x600 #define RX_ALLOC_SIZE 0x620 #define ULI526X_RESET 1 #define CR0_DEFAULT 0 #define CR6_DEFAULT 0x22200000 #define CR7_DEFAULT 0x180c1 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */ #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */ #define MAX_PACKET_SIZE 1514 #define ULI5261_MAX_MULTICAST 14 #define RX_COPY_SIZE 100 #define MAX_CHECK_PACKET 0x8000 #define ULI526X_10MHF 0 #define ULI526X_100MHF 1 #define ULI526X_10MFD 4 #define ULI526X_100MFD 5 #define ULI526X_AUTO 8 #define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */ #define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */ #define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */ #define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */ #define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */ #define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */ #define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ #define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */ #define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */ #define ULI526X_DBUG(dbug_now, msg, value) \ do { \ if (uli526x_debug || (dbug_now)) \ pr_err("%s %lx\n", (msg), (long) (value)); \ } while (0) #define SHOW_MEDIA_TYPE(mode) \ pr_err("Change Speed to %sMhz %s duplex\n", \ mode & 1 ? "100" : "10", \ mode & 4 ? "full" : "half"); /* CR9 definition: SROM/MII */ #define CR9_SROM_READ 0x4800 #define CR9_SRCS 0x1 #define CR9_SRCLK 0x2 #define CR9_CRDOUT 0x8 #define SROM_DATA_0 0x0 #define SROM_DATA_1 0x4 #define PHY_DATA_1 0x20000 #define PHY_DATA_0 0x00000 #define MDCLKH 0x10000 #define PHY_POWER_DOWN 0x800 #define SROM_V41_CODE 0x14 #define SROM_CLK_WRITE(data, ioaddr) \ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ udelay(5); \ outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ udelay(5); \ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ udelay(5); /* Structure/enum declaration ------------------------------- */ struct tx_desc { __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ char *tx_buf_ptr; /* Data for us */ struct tx_desc *next_tx_desc; } __attribute__(( aligned(32) )); struct rx_desc { __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ struct sk_buff *rx_skb_ptr; /* Data for us */ struct rx_desc *next_rx_desc; } __attribute__(( aligned(32) )); struct uli526x_board_info { u32 chip_id; /* Chip vendor/Device ID */ struct net_device *next_dev; /* next device */ struct pci_dev *pdev; /* PCI device */ spinlock_t lock; long ioaddr; /* I/O base address */ u32 cr0_data; u32 cr5_data; u32 cr6_data; u32 cr7_data; u32 cr15_data; /* pointer for memory physical address */ dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */ dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */ dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */ dma_addr_t first_tx_desc_dma; dma_addr_t first_rx_desc_dma; /* descriptor pointer */ unsigned char *buf_pool_ptr; /* Tx buffer pool memory */ unsigned char *buf_pool_start; /* Tx buffer pool align dword */ unsigned char *desc_pool_ptr; /* descriptor pool memory */ struct tx_desc *first_tx_desc; struct tx_desc *tx_insert_ptr; struct tx_desc *tx_remove_ptr; struct rx_desc *first_rx_desc; struct rx_desc *rx_insert_ptr; struct rx_desc *rx_ready_ptr; /* packet come pointer */ unsigned long tx_packet_cnt; /* transmitted packet count */ unsigned long rx_avail_cnt; /* available rx descriptor count */ unsigned long interval_rx_cnt; /* rx packet count a callback time */ u16 dbug_cnt; u16 NIC_capability; /* NIC media capability */ u16 PHY_reg4; /* Saved Phyxcer register 4 value */ u8 media_mode; /* user specify media mode */ u8 op_mode; /* real work media mode */ u8 phy_addr; u8 link_failed; /* Ever link failed */ u8 wait_reset; /* Hardware failed, need to reset */ struct timer_list timer; /* Driver defined statistic counter */ unsigned long tx_fifo_underrun; unsigned long tx_loss_carrier; unsigned long tx_no_carrier; unsigned long tx_late_collision; unsigned long tx_excessive_collision; unsigned long tx_jabber_timeout; unsigned long reset_count; unsigned long reset_cr8; unsigned long reset_fatal; unsigned long reset_TXtimeout; /* NIC SROM data */ unsigned char srom[128]; u8 init; }; enum uli526x_offsets { DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20, DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48, DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70, DCR15 = 0x78 }; enum uli526x_CR6_bits { CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80, CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000, CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000 }; /* Global variable declaration ----------------------------- */ static int __devinitdata printed_version; static const char version[] __devinitconst = "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; static int uli526x_debug; static unsigned char uli526x_media_mode = ULI526X_AUTO; static u32 uli526x_cr6_user_set; /* For module input parameter */ static int debug; static u32 cr6set; static int mode = 8; /* function declaration ------------------------------------- */ static int uli526x_open(struct net_device *); static netdev_tx_t uli526x_start_xmit(struct sk_buff *, struct net_device *); static int uli526x_stop(struct net_device *); static void uli526x_set_filter_mode(struct net_device *); static const struct ethtool_ops netdev_ethtool_ops; static u16 read_srom_word(long, int); static irqreturn_t uli526x_interrupt(int, void *); #ifdef CONFIG_NET_POLL_CONTROLLER static void uli526x_poll(struct net_device *dev); #endif static void uli526x_descriptor_init(struct net_device *, unsigned long); static void allocate_rx_buffer(struct net_device *); static void update_cr6(u32, unsigned long); static void send_filter_frame(struct net_device *, int); static u16 phy_read(unsigned long, u8, u8, u32); static u16 phy_readby_cr10(unsigned long, u8, u8); static void phy_write(unsigned long, u8, u8, u16, u32); static void phy_writeby_cr10(unsigned long, u8, u8, u16); static void phy_write_1bit(unsigned long, u32, u32); static u16 phy_read_1bit(unsigned long, u32); static u8 uli526x_sense_speed(struct uli526x_board_info *); static void uli526x_process_mode(struct uli526x_board_info *); static void uli526x_timer(unsigned long); static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *); static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *); static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *); static void uli526x_dynamic_reset(struct net_device *); static void uli526x_free_rxbuffer(struct uli526x_board_info *); static void uli526x_init(struct net_device *); static void uli526x_set_phyxcer(struct uli526x_board_info *); /* ULI526X network board routine ---------------------------- */ static const struct net_device_ops netdev_ops = { .ndo_open = uli526x_open, .ndo_stop = uli526x_stop, .ndo_start_xmit = uli526x_start_xmit, .ndo_set_rx_mode = uli526x_set_filter_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = uli526x_poll, #endif }; /* * Search ULI526X board, allocate space and register it */ static int __devinit uli526x_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct uli526x_board_info *db; /* board information structure */ struct net_device *dev; int i, err; ULI526X_DBUG(0, "uli526x_init_one()", 0); if (!printed_version++) pr_info("%s\n", version); /* Init network device */ dev = alloc_etherdev(sizeof(*db)); if (dev == NULL) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { pr_warn("32-bit PCI DMA not available\n"); err = -ENODEV; goto err_out_free; } /* Enable Master/IO access, Disable memory access */ err = pci_enable_device(pdev); if (err) goto err_out_free; if (!pci_resource_start(pdev, 0)) { pr_err("I/O base is zero\n"); err = -ENODEV; goto err_out_disable; } if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) { pr_err("Allocated I/O size too small\n"); err = -ENODEV; goto err_out_disable; } if (pci_request_regions(pdev, DRV_NAME)) { pr_err("Failed to request PCI regions\n"); err = -ENODEV; goto err_out_disable; } /* Init system & device */ db = netdev_priv(dev); /* Allocate Tx/Rx descriptor memory */ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); if(db->desc_pool_ptr == NULL) { err = -ENOMEM; goto err_out_nomem; } db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); if(db->buf_pool_ptr == NULL) { err = -ENOMEM; goto err_out_nomem; } db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; db->first_tx_desc_dma = db->desc_pool_dma_ptr; db->buf_pool_start = db->buf_pool_ptr; db->buf_pool_dma_start = db->buf_pool_dma_ptr; db->chip_id = ent->driver_data; db->ioaddr = pci_resource_start(pdev, 0); db->pdev = pdev; db->init = 1; dev->base_addr = db->ioaddr; dev->irq = pdev->irq; pci_set_drvdata(pdev, dev); /* Register some necessary functions */ dev->netdev_ops = &netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; spin_lock_init(&db->lock); /* read 64 word srom data */ for (i = 0; i < 64; i++) ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); /* Set Node address */ if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ { outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port outl(0, db->ioaddr + DCR14); //Clear reset port outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer outl(0, db->ioaddr + DCR14); //Clear reset port outl(0, db->ioaddr + DCR13); //Clear CR13 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port //Read MAC address from CR14 for (i = 0; i < 6; i++) dev->dev_addr[i] = inl(db->ioaddr + DCR14); //Read end outl(0, db->ioaddr + DCR13); //Clear CR13 outl(0, db->ioaddr + DCR0); //Clear CR0 udelay(10); } else /*Exist SROM*/ { for (i = 0; i < 6; i++) dev->dev_addr[i] = db->srom[20 + i]; } err = register_netdev (dev); if (err) goto err_out_res; netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n", ent->driver_data >> 16, pci_name(pdev), dev->dev_addr, dev->irq); pci_set_master(pdev); return 0; err_out_res: pci_release_regions(pdev); err_out_nomem: if(db->desc_pool_ptr) pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, db->desc_pool_ptr, db->desc_pool_dma_ptr); if(db->buf_pool_ptr != NULL) pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, db->buf_pool_ptr, db->buf_pool_dma_ptr); err_out_disable: pci_disable_device(pdev); err_out_free: pci_set_drvdata(pdev, NULL); free_netdev(dev); return err; } static void __devexit uli526x_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct uli526x_board_info *db = netdev_priv(dev); ULI526X_DBUG(0, "uli526x_remove_one()", 0); pci_free_consistent(db->pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, db->desc_pool_ptr, db->desc_pool_dma_ptr); pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, db->buf_pool_ptr, db->buf_pool_dma_ptr); unregister_netdev(dev); pci_release_regions(pdev); free_netdev(dev); /* free board information */ pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); ULI526X_DBUG(0, "uli526x_remove_one() exit", 0); } /* * Open the interface. * The interface is opened whenever "ifconfig" activates it. */ static int uli526x_open(struct net_device *dev) { int ret; struct uli526x_board_info *db = netdev_priv(dev); ULI526X_DBUG(0, "uli526x_open", 0); /* system variable init */ db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; db->tx_packet_cnt = 0; db->rx_avail_cnt = 0; db->link_failed = 1; netif_carrier_off(dev); db->wait_reset = 0; db->NIC_capability = 0xf; /* All capability*/ db->PHY_reg4 = 0x1e0; /* CR6 operation mode decision */ db->cr6_data |= ULI526X_TXTH_256; db->cr0_data = CR0_DEFAULT; /* Initialize ULI526X board */ uli526x_init(dev); ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; /* Active System Interface */ netif_wake_queue(dev); /* set and active a timer process */ init_timer(&db->timer); db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; db->timer.data = (unsigned long)dev; db->timer.function = uli526x_timer; add_timer(&db->timer); return 0; } /* Initialize ULI526X board * Reset ULI526X board * Initialize TX/Rx descriptor chain structure * Send the set-up frame * Enable Tx/Rx machine */ static void uli526x_init(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); unsigned long ioaddr = db->ioaddr; u8 phy_tmp; u8 timeout; u16 phy_value; u16 phy_reg_reset; ULI526X_DBUG(0, "uli526x_init()", 0); /* Reset M526x MAC controller */ outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */ udelay(100); outl(db->cr0_data, ioaddr + DCR0); udelay(5); /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ db->phy_addr = 1; for(phy_tmp=0;phy_tmp<32;phy_tmp++) { phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add if(phy_value != 0xffff&&phy_value!=0) { db->phy_addr = phy_tmp; break; } } if(phy_tmp == 32) pr_warn("Can not find the phy address!!!\n"); /* Parser SROM and media mode */ db->media_mode = uli526x_media_mode; /* phyxcer capability setting */ phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); phy_reg_reset = (phy_reg_reset | 0x8000); phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management * functions") or phy data sheet for details on phy reset */ udelay(500); timeout = 10; while (timeout-- && phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000) udelay(100); /* Process Phyxcer Media Mode */ uli526x_set_phyxcer(db); /* Media Mode Process */ if ( !(db->media_mode & ULI526X_AUTO) ) db->op_mode = db->media_mode; /* Force Mode */ /* Initialize Transmit/Receive decriptor and CR3/4 */ uli526x_descriptor_init(dev, ioaddr); /* Init CR6 to program M526X operation */ update_cr6(db->cr6_data, ioaddr); /* Send setup frame */ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */ /* Init CR7, interrupt active bit */ db->cr7_data = CR7_DEFAULT; outl(db->cr7_data, ioaddr + DCR7); /* Init CR15, Tx jabber and Rx watchdog timer */ outl(db->cr15_data, ioaddr + DCR15); /* Enable ULI526X Tx/Rx function */ db->cr6_data |= CR6_RXSC | CR6_TXSC; update_cr6(db->cr6_data, ioaddr); } /* * Hardware start transmission. * Send a packet to media from the upper layer. */ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); struct tx_desc *txptr; unsigned long flags; ULI526X_DBUG(0, "uli526x_start_xmit", 0); /* Resource flag check */ netif_stop_queue(dev); /* Too large packet check */ if (skb->len > MAX_PACKET_SIZE) { netdev_err(dev, "big packet = %d\n", (u16)skb->len); dev_kfree_skb(skb); return NETDEV_TX_OK; } spin_lock_irqsave(&db->lock, flags); /* No Tx resource check, it never happen nromally */ if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) { spin_unlock_irqrestore(&db->lock, flags); netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt); return NETDEV_TX_BUSY; } /* Disable NIC interrupt */ outl(0, dev->base_addr + DCR7); /* transmit this packet */ txptr = db->tx_insert_ptr; skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); /* Point to next transmit free descriptor */ db->tx_insert_ptr = txptr->next_tx_desc; /* Transmit Packet Process */ if ( (db->tx_packet_cnt < TX_DESC_CNT) ) { txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ dev->trans_start = jiffies; /* saved time stamp */ } /* Tx resource check */ if ( db->tx_packet_cnt < TX_FREE_DESC_CNT ) netif_wake_queue(dev); /* Restore CR7 to enable interrupt */ spin_unlock_irqrestore(&db->lock, flags); outl(db->cr7_data, dev->base_addr + DCR7); /* free this SKB */ dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * Stop the interface. * The interface is stopped when it is brought. */ static int uli526x_stop(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; ULI526X_DBUG(0, "uli526x_stop", 0); /* disable system */ netif_stop_queue(dev); /* deleted timer */ del_timer_sync(&db->timer); /* Reset & stop ULI526X board */ outl(ULI526X_RESET, ioaddr + DCR0); udelay(5); phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); /* free interrupt */ free_irq(dev->irq, dev); /* free allocated rx buffer */ uli526x_free_rxbuffer(db); return 0; } /* * M5261/M5263 insterrupt handler * receive the packet to upper layer, free the transmitted packet */ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct uli526x_board_info *db = netdev_priv(dev); unsigned long ioaddr = dev->base_addr; unsigned long flags; spin_lock_irqsave(&db->lock, flags); outl(0, ioaddr + DCR7); /* Got ULI526X status */ db->cr5_data = inl(ioaddr + DCR5); outl(db->cr5_data, ioaddr + DCR5); if ( !(db->cr5_data & 0x180c1) ) { /* Restore CR7 to enable interrupt mask */ outl(db->cr7_data, ioaddr + DCR7); spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; } /* Check system status */ if (db->cr5_data & 0x2000) { /* system bus error happen */ ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data); db->reset_fatal++; db->wait_reset = 1; /* Need to RESET */ spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; } /* Received the coming packet */ if ( (db->cr5_data & 0x40) && db->rx_avail_cnt ) uli526x_rx_packet(dev, db); /* reallocate rx descriptor buffer */ if (db->rx_avail_cnt<RX_DESC_CNT) allocate_rx_buffer(dev); /* Free the transmitted descriptor */ if ( db->cr5_data & 0x01) uli526x_free_tx_pkt(dev, db); /* Restore CR7 to enable interrupt mask */ outl(db->cr7_data, ioaddr + DCR7); spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void uli526x_poll(struct net_device *dev) { /* ISR grabs the irqsave lock, so this should be safe */ uli526x_interrupt(dev->irq, dev); } #endif /* * Free TX resource after TX complete */ static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db) { struct tx_desc *txptr; u32 tdes0; txptr = db->tx_remove_ptr; while(db->tx_packet_cnt) { tdes0 = le32_to_cpu(txptr->tdes0); if (tdes0 & 0x80000000) break; /* A packet sent completed */ db->tx_packet_cnt--; dev->stats.tx_packets++; /* Transmit statistic counter */ if ( tdes0 != 0x7fffffff ) { dev->stats.collisions += (tdes0 >> 3) & 0xf; dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; if (tdes0 & TDES0_ERR_MASK) { dev->stats.tx_errors++; if (tdes0 & 0x0002) { /* UnderRun */ db->tx_fifo_underrun++; if ( !(db->cr6_data & CR6_SFT) ) { db->cr6_data = db->cr6_data | CR6_SFT; update_cr6(db->cr6_data, db->ioaddr); } } if (tdes0 & 0x0100) db->tx_excessive_collision++; if (tdes0 & 0x0200) db->tx_late_collision++; if (tdes0 & 0x0400) db->tx_no_carrier++; if (tdes0 & 0x0800) db->tx_loss_carrier++; if (tdes0 & 0x4000) db->tx_jabber_timeout++; } } txptr = txptr->next_tx_desc; }/* End of while */ /* Update TX remove pointer to next */ db->tx_remove_ptr = txptr; /* Resource available check */ if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT ) netif_wake_queue(dev); /* Active upper layer, send again */ } /* * Receive the come packet and pass to upper layer */ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db) { struct rx_desc *rxptr; struct sk_buff *skb; int rxlen; u32 rdes0; rxptr = db->rx_ready_ptr; while(db->rx_avail_cnt) { rdes0 = le32_to_cpu(rxptr->rdes0); if (rdes0 & 0x80000000) /* packet owner check */ { break; } db->rx_avail_cnt--; db->interval_rx_cnt++; pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); if ( (rdes0 & 0x300) != 0x300) { /* A packet without First/Last flag */ /* reuse this SKB */ ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); uli526x_reuse_skb(db, rxptr->rx_skb_ptr); } else { /* A packet with First/Last flag */ rxlen = ( (rdes0 >> 16) & 0x3fff) - 4; /* error summary bit check */ if (rdes0 & 0x8000) { /* This is a error packet */ dev->stats.rx_errors++; if (rdes0 & 1) dev->stats.rx_fifo_errors++; if (rdes0 & 2) dev->stats.rx_crc_errors++; if (rdes0 & 0x80) dev->stats.rx_length_errors++; } if ( !(rdes0 & 0x8000) || ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { struct sk_buff *new_skb = NULL; skb = rxptr->rx_skb_ptr; /* Good packet, send to upper layer */ /* Shorst packet used new SKB */ if ((rxlen < RX_COPY_SIZE) && (((new_skb = netdev_alloc_skb(dev, rxlen + 2)) != NULL))) { skb = new_skb; /* size less than COPY_SIZE, allocate a rxlen SKB */ skb_reserve(skb, 2); /* 16byte align */ memcpy(skb_put(skb, rxlen), skb_tail_pointer(rxptr->rx_skb_ptr), rxlen); uli526x_reuse_skb(db, rxptr->rx_skb_ptr); } else skb_put(skb, rxlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += rxlen; } else { /* Reuse SKB buffer when the packet is error */ ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); uli526x_reuse_skb(db, rxptr->rx_skb_ptr); } } rxptr = rxptr->next_rx_desc; } db->rx_ready_ptr = rxptr; } /* * Set ULI526X multicast address */ static void uli526x_set_filter_mode(struct net_device * dev) { struct uli526x_board_info *db = netdev_priv(dev); unsigned long flags; ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0); spin_lock_irqsave(&db->lock, flags); if (dev->flags & IFF_PROMISC) { ULI526X_DBUG(0, "Enable PROM Mode", 0); db->cr6_data |= CR6_PM | CR6_PBF; update_cr6(db->cr6_data, db->ioaddr); spin_unlock_irqrestore(&db->lock, flags); return; } if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) { ULI526X_DBUG(0, "Pass all multicast address", netdev_mc_count(dev)); db->cr6_data &= ~(CR6_PM | CR6_PBF); db->cr6_data |= CR6_PAM; spin_unlock_irqrestore(&db->lock, flags); return; } ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev)); send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */ spin_unlock_irqrestore(&db->lock, flags); } static void ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) { ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII); ecmd->advertising = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII); ecmd->port = PORT_MII; ecmd->phy_address = db->phy_addr; ecmd->transceiver = XCVR_EXTERNAL; ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = DUPLEX_HALF; if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) { ethtool_cmd_speed_set(ecmd, SPEED_100); } if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) { ecmd->duplex = DUPLEX_FULL; } if(db->link_failed) { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } if (db->media_mode & ULI526X_AUTO) { ecmd->autoneg = AUTONEG_ENABLE; } } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct uli526x_board_info *np = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); if (np->pdev) strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); else sprintf(info->bus_info, "EISA 0x%lx %d", dev->base_addr, dev->irq); } static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct uli526x_board_info *np = netdev_priv(dev); ULi_ethtool_gset(np, cmd); return 0; } static u32 netdev_get_link(struct net_device *dev) { struct uli526x_board_info *np = netdev_priv(dev); if(np->link_failed) return 0; else return 1; } static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { wol->supported = WAKE_PHY | WAKE_MAGIC; wol->wolopts = 0; } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .get_link = netdev_get_link, .get_wol = uli526x_get_wol, }; /* * A periodic timer routine * Dynamic media sense, allocate Rx buffer... */ static void uli526x_timer(unsigned long data) { u32 tmp_cr8; unsigned char tmp_cr12=0; struct net_device *dev = (struct net_device *) data; struct uli526x_board_info *db = netdev_priv(dev); unsigned long flags; //ULI526X_DBUG(0, "uli526x_timer()", 0); spin_lock_irqsave(&db->lock, flags); /* Dynamic reset ULI526X : system error or transmit time-out */ tmp_cr8 = inl(db->ioaddr + DCR8); if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { db->reset_cr8++; db->wait_reset = 1; } db->interval_rx_cnt = 0; /* TX polling kick monitor */ if ( db->tx_packet_cnt && time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) { outl(0x1, dev->base_addr + DCR1); // Tx polling again // TX Timeout if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { db->reset_TXtimeout++; db->wait_reset = 1; netdev_err(dev, " Tx timeout - resetting\n"); } } if (db->wait_reset) { ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt); db->reset_count++; uli526x_dynamic_reset(dev); db->timer.expires = ULI526X_TIMER_WUT; add_timer(&db->timer); spin_unlock_irqrestore(&db->lock, flags); return; } /* Link status check, Dynamic media type change */ if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0) tmp_cr12 = 3; if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { /* Link Failed */ ULI526X_DBUG(0, "Link Failed", tmp_cr12); netif_carrier_off(dev); netdev_info(dev, "NIC Link is Down\n"); db->link_failed = 1; /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ /* AUTO don't need */ if ( !(db->media_mode & 0x8) ) phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); /* AUTO mode, if INT phyxcer link failed, select EXT device */ if (db->media_mode & ULI526X_AUTO) { db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ update_cr6(db->cr6_data, db->ioaddr); } } else if ((tmp_cr12 & 0x3) && db->link_failed) { ULI526X_DBUG(0, "Link link OK", tmp_cr12); db->link_failed = 0; /* Auto Sense Speed */ if ( (db->media_mode & ULI526X_AUTO) && uli526x_sense_speed(db) ) db->link_failed = 1; uli526x_process_mode(db); if(db->link_failed==0) { netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n", (db->op_mode == ULI526X_100MHF || db->op_mode == ULI526X_100MFD) ? 100 : 10, (db->op_mode == ULI526X_10MFD || db->op_mode == ULI526X_100MFD) ? "Full" : "Half"); netif_carrier_on(dev); } /* SHOW_MEDIA_TYPE(db->op_mode); */ } else if(!(tmp_cr12 & 0x3) && db->link_failed) { if(db->init==1) { netdev_info(dev, "NIC Link is Down\n"); netif_carrier_off(dev); } } db->init=0; /* Timer active again */ db->timer.expires = ULI526X_TIMER_WUT; add_timer(&db->timer); spin_unlock_irqrestore(&db->lock, flags); } /* * Stop ULI526X board * Free Tx/Rx allocated memory * Init system variable */ static void uli526x_reset_prepare(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); /* Sopt MAC controller */ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ update_cr6(db->cr6_data, dev->base_addr); outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); /* Disable upper layer interface */ netif_stop_queue(dev); /* Free Rx Allocate buffer */ uli526x_free_rxbuffer(db); /* system variable init */ db->tx_packet_cnt = 0; db->rx_avail_cnt = 0; db->link_failed = 1; db->init=1; db->wait_reset = 0; } /* * Dynamic reset the ULI526X board * Stop ULI526X board * Free Tx/Rx allocated memory * Reset ULI526X board * Re-initialize ULI526X board */ static void uli526x_dynamic_reset(struct net_device *dev) { ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); uli526x_reset_prepare(dev); /* Re-initialize ULI526X board */ uli526x_init(dev); /* Restart upper layer interface */ netif_wake_queue(dev); } #ifdef CONFIG_PM /* * Suspend the interface. */ static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); pci_power_t power_state; int err; ULI526X_DBUG(0, "uli526x_suspend", 0); if (!netdev_priv(dev)) return 0; pci_save_state(pdev); if (!netif_running(dev)) return 0; netif_device_detach(dev); uli526x_reset_prepare(dev); power_state = pci_choose_state(pdev, state); pci_enable_wake(pdev, power_state, 0); err = pci_set_power_state(pdev, power_state); if (err) { netif_device_attach(dev); /* Re-initialize ULI526X board */ uli526x_init(dev); /* Restart upper layer interface */ netif_wake_queue(dev); } return err; } /* * Resume the interface. */ static int uli526x_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); int err; ULI526X_DBUG(0, "uli526x_resume", 0); if (!netdev_priv(dev)) return 0; pci_restore_state(pdev); if (!netif_running(dev)) return 0; err = pci_set_power_state(pdev, PCI_D0); if (err) { netdev_warn(dev, "Could not put device into D0\n"); return err; } netif_device_attach(dev); /* Re-initialize ULI526X board */ uli526x_init(dev); /* Restart upper layer interface */ netif_wake_queue(dev); return 0; } #else /* !CONFIG_PM */ #define uli526x_suspend NULL #define uli526x_resume NULL #endif /* !CONFIG_PM */ /* * free all allocated rx buffer */ static void uli526x_free_rxbuffer(struct uli526x_board_info * db) { ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0); /* free allocated rx buffer */ while (db->rx_avail_cnt) { dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr); db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc; db->rx_avail_cnt--; } } /* * Reuse the SK buffer */ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb) { struct rx_desc *rxptr = db->rx_insert_ptr; if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { rxptr->rx_skb_ptr = skb; rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, skb_tail_pointer(skb), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE)); wmb(); rxptr->rdes0 = cpu_to_le32(0x80000000); db->rx_avail_cnt++; db->rx_insert_ptr = rxptr->next_rx_desc; } else ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt); } /* * Initialize transmit/Receive descriptor * Using Chain structure, and allocate Tx/Rx buffer */ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr) { struct uli526x_board_info *db = netdev_priv(dev); struct tx_desc *tmp_tx; struct rx_desc *tmp_rx; unsigned char *tmp_buf; dma_addr_t tmp_tx_dma, tmp_rx_dma; dma_addr_t tmp_buf_dma; int i; ULI526X_DBUG(0, "uli526x_descriptor_init()", 0); /* tx descriptor start pointer */ db->tx_insert_ptr = db->first_tx_desc; db->tx_remove_ptr = db->first_tx_desc; outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ /* rx descriptor start pointer */ db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; db->rx_insert_ptr = db->first_rx_desc; db->rx_ready_ptr = db->first_rx_desc; outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ /* Init Transmit chain */ tmp_buf = db->buf_pool_start; tmp_buf_dma = db->buf_pool_dma_start; tmp_tx_dma = db->first_tx_desc_dma; for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) { tmp_tx->tx_buf_ptr = tmp_buf; tmp_tx->tdes0 = cpu_to_le32(0); tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */ tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma); tmp_tx_dma += sizeof(struct tx_desc); tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma); tmp_tx->next_tx_desc = tmp_tx + 1; tmp_buf = tmp_buf + TX_BUF_ALLOC; tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC; } (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma); tmp_tx->next_tx_desc = db->first_tx_desc; /* Init Receive descriptor chain */ tmp_rx_dma=db->first_rx_desc_dma; for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) { tmp_rx->rdes0 = cpu_to_le32(0); tmp_rx->rdes1 = cpu_to_le32(0x01000600); tmp_rx_dma += sizeof(struct rx_desc); tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma); tmp_rx->next_rx_desc = tmp_rx + 1; } (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma); tmp_rx->next_rx_desc = db->first_rx_desc; /* pre-allocate Rx buffer */ allocate_rx_buffer(dev); } /* * Update CR6 value * Firstly stop ULI526X, then written value and start */ static void update_cr6(u32 cr6_data, unsigned long ioaddr) { outl(cr6_data, ioaddr + DCR6); udelay(5); } /* * Send a setup frame for M5261/M5263 * This setup frame initialize ULI526X address filter mode */ #ifdef __BIG_ENDIAN #define FLT_SHIFT 16 #else #define FLT_SHIFT 0 #endif static void send_filter_frame(struct net_device *dev, int mc_cnt) { struct uli526x_board_info *db = netdev_priv(dev); struct netdev_hw_addr *ha; struct tx_desc *txptr; u16 * addrptr; u32 * suptr; int i; ULI526X_DBUG(0, "send_filter_frame()", 0); txptr = db->tx_insert_ptr; suptr = (u32 *) txptr->tx_buf_ptr; /* Node address */ addrptr = (u16 *) dev->dev_addr; *suptr++ = addrptr[0] << FLT_SHIFT; *suptr++ = addrptr[1] << FLT_SHIFT; *suptr++ = addrptr[2] << FLT_SHIFT; /* broadcast address */ *suptr++ = 0xffff << FLT_SHIFT; *suptr++ = 0xffff << FLT_SHIFT; *suptr++ = 0xffff << FLT_SHIFT; /* fit the multicast address */ netdev_for_each_mc_addr(ha, dev) { addrptr = (u16 *) ha->addr; *suptr++ = addrptr[0] << FLT_SHIFT; *suptr++ = addrptr[1] << FLT_SHIFT; *suptr++ = addrptr[2] << FLT_SHIFT; } for (i = netdev_mc_count(dev); i < 14; i++) { *suptr++ = 0xffff << FLT_SHIFT; *suptr++ = 0xffff << FLT_SHIFT; *suptr++ = 0xffff << FLT_SHIFT; } /* prepare the setup frame */ db->tx_insert_ptr = txptr->next_tx_desc; txptr->tdes1 = cpu_to_le32(0x890000c0); /* Resource Check and Send the setup packet */ if (db->tx_packet_cnt < TX_DESC_CNT) { /* Resource Empty */ db->tx_packet_cnt++; txptr->tdes0 = cpu_to_le32(0x80000000); update_cr6(db->cr6_data | 0x2000, dev->base_addr); outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ update_cr6(db->cr6_data, dev->base_addr); dev->trans_start = jiffies; } else netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); } /* * Allocate rx buffer, * As possible as allocate maxiumn Rx buffer */ static void allocate_rx_buffer(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); struct rx_desc *rxptr; struct sk_buff *skb; rxptr = db->rx_insert_ptr; while(db->rx_avail_cnt < RX_DESC_CNT) { skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE); if (skb == NULL) break; rxptr->rx_skb_ptr = skb; /* FIXME (?) */ rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, skb_tail_pointer(skb), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE)); wmb(); rxptr->rdes0 = cpu_to_le32(0x80000000); rxptr = rxptr->next_rx_desc; db->rx_avail_cnt++; } db->rx_insert_ptr = rxptr; } /* * Read one word data from the serial ROM */ static u16 read_srom_word(long ioaddr, int offset) { int i; u16 srom_data = 0; long cr9_ioaddr = ioaddr + DCR9; outl(CR9_SROM_READ, cr9_ioaddr); outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); /* Send the Read Command 110b */ SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); /* Send the offset */ for (i = 5; i >= 0; i--) { srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; SROM_CLK_WRITE(srom_data, cr9_ioaddr); } outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); for (i = 16; i > 0; i--) { outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); udelay(5); srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); udelay(5); } outl(CR9_SROM_READ, cr9_ioaddr); return srom_data; } /* * Auto sense the media mode */ static u8 uli526x_sense_speed(struct uli526x_board_info * db) { u8 ErrFlag = 0; u16 phy_mode; phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); if ( (phy_mode & 0x24) == 0x24 ) { phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); if(phy_mode&0x8000) phy_mode = 0x8000; else if(phy_mode&0x4000) phy_mode = 0x4000; else if(phy_mode&0x2000) phy_mode = 0x2000; else phy_mode = 0x1000; switch (phy_mode) { case 0x1000: db->op_mode = ULI526X_10MHF; break; case 0x2000: db->op_mode = ULI526X_10MFD; break; case 0x4000: db->op_mode = ULI526X_100MHF; break; case 0x8000: db->op_mode = ULI526X_100MFD; break; default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break; } } else { db->op_mode = ULI526X_10MHF; ULI526X_DBUG(0, "Link Failed :", phy_mode); ErrFlag = 1; } return ErrFlag; } /* * Set 10/100 phyxcer capability * AUTO mode : phyxcer register4 is NIC capability * Force mode: phyxcer register4 is the force media */ static void uli526x_set_phyxcer(struct uli526x_board_info *db) { u16 phy_reg; /* Phyxcer capability setting */ phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; if (db->media_mode & ULI526X_AUTO) { /* AUTO Mode */ phy_reg |= db->PHY_reg4; } else { /* Force Mode */ switch(db->media_mode) { case ULI526X_10MHF: phy_reg |= 0x20; break; case ULI526X_10MFD: phy_reg |= 0x40; break; case ULI526X_100MHF: phy_reg |= 0x80; break; case ULI526X_100MFD: phy_reg |= 0x100; break; } } /* Write new capability to Phyxcer Reg4 */ if ( !(phy_reg & 0x01e0)) { phy_reg|=db->PHY_reg4; db->media_mode|=ULI526X_AUTO; } phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); /* Restart Auto-Negotiation */ phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); udelay(50); } /* * Process op-mode AUTO mode : PHY controller in Auto-negotiation Mode * Force mode: PHY controller in force mode with HUB * N-way force capability with SWITCH */ static void uli526x_process_mode(struct uli526x_board_info *db) { u16 phy_reg; /* Full Duplex Mode Check */ if (db->op_mode & 0x4) db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */ else db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */ update_cr6(db->cr6_data, db->ioaddr); /* 10/100M phyxcer force mode need */ if ( !(db->media_mode & 0x8)) { /* Forece Mode */ phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); if ( !(phy_reg & 0x1) ) { /* parter without N-Way capability */ phy_reg = 0x0; switch(db->op_mode) { case ULI526X_10MHF: phy_reg = 0x0; break; case ULI526X_10MFD: phy_reg = 0x100; break; case ULI526X_100MHF: phy_reg = 0x2000; break; case ULI526X_100MFD: phy_reg = 0x2100; break; } phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); } } } /* * Write a word to Phy register */ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) { u16 i; unsigned long ioaddr; if(chip_id == PCI_ULI5263_ID) { phy_writeby_cr10(iobase, phy_addr, offset, phy_data); return; } /* M5261/M5263 Chip */ ioaddr = iobase + DCR9; /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); /* Send start command(01) to Phy */ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); /* Send write command(01) to Phy */ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); /* written trasnition */ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); /* Write a word data to PHY controller */ for ( i = 0x8000; i > 0; i >>= 1) phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); } /* * Read a word data from phy register */ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) { int i; u16 phy_data; unsigned long ioaddr; if(chip_id == PCI_ULI5263_ID) return phy_readby_cr10(iobase, phy_addr, offset); /* M5261/M5263 Chip */ ioaddr = iobase + DCR9; /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); /* Send start command(01) to Phy */ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); /* Send read command(10) to Phy */ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); /* Skip transition state */ phy_read_1bit(ioaddr, chip_id); /* read 16bit data */ for (phy_data = 0, i = 0; i < 16; i++) { phy_data <<= 1; phy_data |= phy_read_1bit(ioaddr, chip_id); } return phy_data; } static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) { unsigned long ioaddr,cr10_value; ioaddr = iobase + DCR10; cr10_value = phy_addr; cr10_value = (cr10_value<<5) + offset; cr10_value = (cr10_value<<16) + 0x08000000; outl(cr10_value,ioaddr); udelay(1); while(1) { cr10_value = inl(ioaddr); if(cr10_value&0x10000000) break; } return cr10_value & 0x0ffff; } static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) { unsigned long ioaddr,cr10_value; ioaddr = iobase + DCR10; cr10_value = phy_addr; cr10_value = (cr10_value<<5) + offset; cr10_value = (cr10_value<<16) + 0x04000000 + phy_data; outl(cr10_value,ioaddr); udelay(1); } /* * Write one bit data to Phy Controller */ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) { outl(phy_data , ioaddr); /* MII Clock Low */ udelay(1); outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ udelay(1); outl(phy_data , ioaddr); /* MII Clock Low */ udelay(1); } /* * Read one bit phy data from PHY controller */ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) { u16 phy_data; outl(0x50000 , ioaddr); udelay(1); phy_data = ( inl(ioaddr) >> 19 ) & 0x1; outl(0x40000 , ioaddr); udelay(1); return phy_data; } static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = { { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, { 0, } }; MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl); static struct pci_driver uli526x_driver = { .name = "uli526x", .id_table = uli526x_pci_tbl, .probe = uli526x_init_one, .remove = __devexit_p(uli526x_remove_one), .suspend = uli526x_suspend, .resume = uli526x_resume, }; MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw"); MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver"); MODULE_LICENSE("GPL"); module_param(debug, int, 0644); module_param(mode, int, 0); module_param(cr6set, int, 0); MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)"); MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); /* Description: * when user used insmod to add module, system invoked init_module() * to register the services. */ static int __init uli526x_init_module(void) { pr_info("%s\n", version); printed_version = 1; ULI526X_DBUG(0, "init_module() ", debug); if (debug) uli526x_debug = debug; /* set debug flag */ if (cr6set) uli526x_cr6_user_set = cr6set; switch (mode) { case ULI526X_10MHF: case ULI526X_100MHF: case ULI526X_10MFD: case ULI526X_100MFD: uli526x_media_mode = mode; break; default: uli526x_media_mode = ULI526X_AUTO; break; } return pci_register_driver(&uli526x_driver); } /* * Description: * when user used rmmod to delete module, system invoked clean_module() * to un-register all registered services. */ static void __exit uli526x_cleanup_module(void) { ULI526X_DBUG(0, "uli526x_clean_module() ", debug); pci_unregister_driver(&uli526x_driver); } module_init(uli526x_init_module); module_exit(uli526x_cleanup_module);
gpl-2.0
EnJens/android_kernel_sony_pollux_windy_stock
lib/swiotlb.c
4805
26277
/* * Dynamic DMA mapping support. * * This implementation is a fallback for platforms that do not support * I/O TLBs (aka DMA address translation hardware). * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> * Copyright (C) 2000, 2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid * unnecessary i-cache flushing. * 04/07/.. ak Better overflow handling. Assorted fixes. * 05/09/10 linville Add support for syncing ranges, support syncing for * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. * 08/12/11 beckyb Add highmem support */ #include <linux/cache.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/swiotlb.h> #include <linux/pfn.h> #include <linux/types.h> #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/scatterlist.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/iommu-helper.h> #define OFFSET(val,align) ((unsigned long) \ ( (val) & ( (align) - 1))) #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) /* * Minimum IO TLB size to bother booting with. Systems with mainly * 64bit capable cards will only lightly use the swiotlb. If we can't * allocate a contiguous 1MB, we're probably in trouble anyway. */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) int swiotlb_force; /* * Used to do a quick range check in swiotlb_tbl_unmap_single and * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this * API. */ static char *io_tlb_start, *io_tlb_end; /* * The number of IO TLB blocks (in groups of 64) between io_tlb_start and * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. */ static unsigned long io_tlb_nslabs; /* * When the IOMMU overflows we return a fallback buffer. This sets the size. */ static unsigned long io_tlb_overflow = 32*1024; static void *io_tlb_overflow_buffer; /* * This is a free list describing the number of free entries available from * each index */ static unsigned int *io_tlb_list; static unsigned int io_tlb_index; /* * We need to save away the original address corresponding to a mapped entry * for the sync operations. */ static phys_addr_t *io_tlb_orig_addr; /* * Protect the above data structures in the map and unmap calls */ static DEFINE_SPINLOCK(io_tlb_lock); static int late_alloc; static int __init setup_io_tlb_npages(char *str) { if (isdigit(*str)) { io_tlb_nslabs = simple_strtoul(str, &str, 0); /* avoid tail segment of size < IO_TLB_SEGSIZE */ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } if (*str == ',') ++str; if (!strcmp(str, "force")) swiotlb_force = 1; return 1; } __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) { return io_tlb_nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) { return phys_to_dma(hwdev, virt_to_phys(address)); } void swiotlb_print_info(void) { unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; phys_addr_t pstart, pend; pstart = virt_to_phys(io_tlb_start); pend = virt_to_phys(io_tlb_end); printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", bytes >> 20, io_tlb_start, io_tlb_end); printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", (unsigned long long)pstart, (unsigned long long)pend); } void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); if (!io_tlb_overflow_buffer) panic("Cannot allocate SWIOTLB overflow buffer!\n"); if (verbose) swiotlb_print_info(); } /* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } bytes = io_tlb_nslabs << IO_TLB_SHIFT; /* * Get IO TLB memory from the low pages */ io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); } void __init swiotlb_init(int verbose) { swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ } /* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */ int swiotlb_late_init_with_default_size(size_t default_size) { unsigned long i, bytes, req_nslabs = io_tlb_nslabs; unsigned int order; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); io_tlb_nslabs = SLABS_PER_PAGE << order; bytes = io_tlb_nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); if (io_tlb_start) break; order--; } if (!io_tlb_start) goto cleanup1; if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; bytes = io_tlb_nslabs << IO_TLB_SHIFT; } io_tlb_end = io_tlb_start + bytes; memset(io_tlb_start, 0, bytes); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(int))); if (!io_tlb_list) goto cleanup2; for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; io_tlb_orig_addr = (phys_addr_t *) __get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); if (!io_tlb_orig_addr) goto cleanup3; memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); /* * Get the overflow emergency buffer */ io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, get_order(io_tlb_overflow)); if (!io_tlb_overflow_buffer) goto cleanup4; swiotlb_print_info(); late_alloc = 1; return 0; cleanup4: free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); io_tlb_orig_addr = NULL; cleanup3: free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); io_tlb_list = NULL; cleanup2: io_tlb_end = NULL; free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; cleanup1: io_tlb_nslabs = req_nslabs; return -ENOMEM; } void __init swiotlb_free(void) { if (!io_tlb_overflow_buffer) return; if (late_alloc) { free_pages((unsigned long)io_tlb_overflow_buffer, get_order(io_tlb_overflow)); free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); free_pages((unsigned long)io_tlb_start, get_order(io_tlb_nslabs << IO_TLB_SHIFT)); } else { free_bootmem_late(__pa(io_tlb_overflow_buffer), PAGE_ALIGN(io_tlb_overflow)); free_bootmem_late(__pa(io_tlb_orig_addr), PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); free_bootmem_late(__pa(io_tlb_list), PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); free_bootmem_late(__pa(io_tlb_start), PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } io_tlb_nslabs = 0; } static int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= virt_to_phys(io_tlb_start) && paddr < virt_to_phys(io_tlb_end); } /* * Bounce: copy the swiotlb buffer back to the original dma location */ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, enum dma_data_direction dir) { unsigned long pfn = PFN_DOWN(phys); if (PageHighMem(pfn_to_page(pfn))) { /* The buffer does not have a mapping. Map it in and copy */ unsigned int offset = phys & ~PAGE_MASK; char *buffer; unsigned int sz = 0; unsigned long flags; while (size) { sz = min_t(size_t, PAGE_SIZE - offset, size); local_irq_save(flags); buffer = kmap_atomic(pfn_to_page(pfn)); if (dir == DMA_TO_DEVICE) memcpy(dma_addr, buffer + offset, sz); else memcpy(buffer + offset, dma_addr, sz); kunmap_atomic(buffer); local_irq_restore(flags); size -= sz; pfn++; dma_addr += sz; offset = 0; } } else { if (dir == DMA_TO_DEVICE) memcpy(dma_addr, phys_to_virt(phys), size); else memcpy(phys_to_virt(phys), dma_addr, size); } } EXPORT_SYMBOL_GPL(swiotlb_bounce); void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t phys, size_t size, enum dma_data_direction dir) { unsigned long flags; char *dma_addr; unsigned int nslots, stride, index, wrap; int i; unsigned long mask; unsigned long offset_slots; unsigned long max_slots; mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; /* * Carefully handle integer overflow which can occur when mask == ~0UL. */ max_slots = mask + 1 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* * For mappings greater than a page, we limit the stride (and * hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; if (size > PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); index = ALIGN(io_tlb_index, stride); if (index >= io_tlb_nslabs) index = 0; wrap = index; do { while (iommu_is_span_boundary(index, nslots, offset_slots, max_slots)) { index += stride; if (index >= io_tlb_nslabs) index = 0; if (index == wrap) goto not_found; } /* * If we find a slot that indicates we have 'nslots' number of * contiguous buffers, we allocate the buffers from that slot * and mark the entries as '0' indicating unavailable. */ if (io_tlb_list[index] >= nslots) { int count = 0; for (i = index; i < (int) (index + nslots); i++) io_tlb_list[i] = 0; for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); /* * Update the indices to avoid searching in the next * round. */ io_tlb_index = ((index + nslots) < io_tlb_nslabs ? (index + nslots) : 0); goto found; } index += stride; if (index >= io_tlb_nslabs) index = 0; } while (index != wrap); not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); return NULL; found: spin_unlock_irqrestore(&io_tlb_lock, flags); /* * Save away the mapping from the original address to the DMA address. * This is needed when we sync the memory. Then we sync the buffer if * needed. */ for (i = 0; i < nslots; i++) io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); return dma_addr; } EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); /* * Allocates bounce buffer and returns its kernel virtual address. */ static void * map_single(struct device *hwdev, phys_addr_t phys, size_t size, enum dma_data_direction dir) { dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); } /* * dma_addr is the kernel virtual address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, enum dma_data_direction dir) { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t phys = io_tlb_orig_addr[index]; /* * First, sync the memory before unmapping the entry */ if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); /* * Return the buffer to the free list by setting the corresponding * entries to indicate the number of contiguous entries available. * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ spin_lock_irqsave(&io_tlb_lock, flags); { count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? io_tlb_list[index + nslots] : 0); /* * Step 1: return the slots to the free list, merging the * slots with superceeding slots */ for (i = index + nslots - 1; i >= index; i--) io_tlb_list[i] = ++count; /* * Step 2: merge the returned slots with the preceding slots, * if available (non zero) */ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; } spin_unlock_irqrestore(&io_tlb_lock, flags); } EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t phys = io_tlb_orig_addr[index]; phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1)); switch (target) { case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); else BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); else BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); } } EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { dma_addr_t dev_addr; void *ret; int order = get_order(size); u64 dma_mask = DMA_BIT_MASK(32); if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { /* * The allocated memory isn't reachable by the device. */ free_pages((unsigned long) ret, order); ret = NULL; } if (!ret) { /* * We are either out of memory or the device can't DMA to * GFP_DMA memory; fall back on map_single(), which * will grab memory from the lowest available address range. */ ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); if (!ret) return NULL; } memset(ret, 0, size); dev_addr = swiotlb_virt_to_bus(hwdev, ret); /* Confirm address can be DMA'd by device */ if (dev_addr + size - 1 > dma_mask) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)dma_mask, (unsigned long long)dev_addr); /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); return NULL; } *dma_handle = dev_addr; return ret; } EXPORT_SYMBOL(swiotlb_alloc_coherent); void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dev_addr) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); WARN_ON(irqs_disabled()); if (!is_swiotlb_buffer(paddr)) free_pages((unsigned long)vaddr, get_order(size)); else /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); } EXPORT_SYMBOL(swiotlb_free_coherent); static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, int do_panic) { /* * Ran out of IOMMU space for this operation. This is very bad. * Unfortunately the drivers cannot handle this operation properly. * unless they check for dma_mapping_error (most don't) * When the mapping is small enough return a static buffer to limit * the damage, or panic when the transfer is too big. */ printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " "device %s\n", size, dev ? dev_name(dev) : "?"); if (size <= io_tlb_overflow || !do_panic) return; if (dir == DMA_BIDIRECTIONAL) panic("DMA: Random memory could be DMA accessed\n"); if (dir == DMA_FROM_DEVICE) panic("DMA: Random memory could be DMA written\n"); if (dir == DMA_TO_DEVICE) panic("DMA: Random memory could be DMA read\n"); } /* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. */ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dev_addr = phys_to_dma(dev, phys); void *map; BUG_ON(dir == DMA_NONE); /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce * buffering it. */ if (dma_capable(dev, dev_addr, size) && !swiotlb_force) return dev_addr; /* * Oh well, have to allocate and map a bounce buffer. */ map = map_single(dev, phys, size, dir); if (!map) { swiotlb_full(dev, size, dir, 1); map = io_tlb_overflow_buffer; } dev_addr = swiotlb_virt_to_bus(dev, map); /* * Ensure that the address returned is DMA'ble */ if (!dma_capable(dev, dev_addr, size)) { swiotlb_tbl_unmap_single(dev, map, size, dir); dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); } return dev_addr; } EXPORT_SYMBOL_GPL(swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous swiotlb_map_page call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(paddr)) { swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); return; } if (dir != DMA_FROM_DEVICE) return; /* * phys_to_virt doesn't work with hihgmem page but we could * call dma_mark_clean() with hihgmem page here. However, we * are fine since dma_mark_clean() is null on POWERPC. We can * make dma_mark_clean() take a physical address if necessary. */ dma_mark_clean(phys_to_virt(paddr), size); } void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { unmap_single(hwdev, dev_addr, size, dir); } EXPORT_SYMBOL_GPL(swiotlb_unmap_page); /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. * * If you perform a swiotlb_map_page() but wish to interrogate the buffer * using the cpu, yet do not wish to teardown the dma mapping, you must * call this function before doing so. At the next point you give the dma * address back to the card, you must first perform a * swiotlb_dma_sync_for_device, and then the device again owns the buffer */ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(paddr)) { swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, target); return; } if (dir != DMA_FROM_DEVICE) return; dma_mark_clean(phys_to_virt(paddr), size); } void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL(swiotlb_sync_single_for_device); /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_page * interface. Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for swiotlb_map_page are the * same here. */ int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); if (swiotlb_force || !dma_capable(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); if (!map) { /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, attrs); sgl[0].dma_length = 0; return 0; } sg->dma_address = swiotlb_virt_to_bus(hwdev, map); } else sg->dma_address = dev_addr; sg->dma_length = sg->length; } return nelems; } EXPORT_SYMBOL(swiotlb_map_sg_attrs); int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); } EXPORT_SYMBOL(swiotlb_map_sg); /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_page() above. */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); } EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); } EXPORT_SYMBOL(swiotlb_unmap_sg); /* * Make physical memory consistent for a set of streaming mode DMA translations * after a transfer. * * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules * and usage. */ static void swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, enum dma_sync_target target) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nelems, i) swiotlb_sync_single(hwdev, sg->dma_address, sg->dma_length, dir, target); } void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } EXPORT_SYMBOL(swiotlb_sync_sg_for_device); int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); } EXPORT_SYMBOL(swiotlb_dma_mapping_error); /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */ int swiotlb_dma_supported(struct device *hwdev, u64 mask) { return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask; } EXPORT_SYMBOL(swiotlb_dma_supported);
gpl-2.0
Root-Box/kernel_samsung_smdk4412
arch/x86/kernel/cpu/mtrr/centaur.c
13253
3027
#include <linux/init.h> #include <linux/mm.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" static struct { unsigned long high; unsigned long low; } centaur_mcr[8]; static u8 centaur_mcr_reserved; static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ /** * centaur_get_free_region - Get a free MTRR. * * @base: The starting (base) address of the region. * @size: The size (in bytes) of the region. * * Returns: the index of the region on success, else -1 on error. */ static int centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) { unsigned long lbase, lsize; mtrr_type ltype; int i, max; max = num_var_ranges; if (replace_reg >= 0 && replace_reg < max) return replace_reg; for (i = 0; i < max; ++i) { if (centaur_mcr_reserved & (1 << i)) continue; mtrr_if->get(i, &lbase, &lsize, &ltype); if (lsize == 0) return i; } return -ENOSPC; } /* * Report boot time MCR setups */ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) { centaur_mcr[mcr].low = lo; centaur_mcr[mcr].high = hi; } static void centaur_get_mcr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type * type) { *base = centaur_mcr[reg].high >> PAGE_SHIFT; *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; *type = MTRR_TYPE_WRCOMB; /* write-combining */ if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) *type = MTRR_TYPE_UNCACHABLE; if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) *type = MTRR_TYPE_WRBACK; if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) *type = MTRR_TYPE_WRBACK; } static void centaur_set_mcr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { unsigned long low, high; if (size == 0) { /* Disable */ high = low = 0; } else { high = base << PAGE_SHIFT; if (centaur_mcr_type == 0) { /* Only support write-combining... */ low = -size << PAGE_SHIFT | 0x1f; } else { if (type == MTRR_TYPE_UNCACHABLE) low = -size << PAGE_SHIFT | 0x02; /* NC */ else low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ } } centaur_mcr[reg].high = high; centaur_mcr[reg].low = low; wrmsr(MSR_IDT_MCR0 + reg, low, high); } static int centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type) { /* * FIXME: Winchip2 supports uncached */ if (type != MTRR_TYPE_WRCOMB && (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { pr_warning("mtrr: only write-combining%s supported\n", centaur_mcr_type ? " and uncacheable are" : " is"); return -EINVAL; } return 0; } static const struct mtrr_ops centaur_mtrr_ops = { .vendor = X86_VENDOR_CENTAUR, .set = centaur_set_mcr, .get = centaur_get_mcr, .get_free_region = centaur_get_free_region, .validate_add_page = centaur_validate_add_page, .have_wrcomb = positive_have_wrcomb, }; int __init centaur_init_mtrr(void) { set_mtrr_ops(&centaur_mtrr_ops); return 0; }
gpl-2.0
Chairshot215/starship_kernel_moto_shamu
net/llc/llc_pdu.c
15045
10888
/* * llc_pdu.c - access to PDU internals * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/netdevice.h> #include <net/llc_pdu.h> static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type); static u8 llc_pdu_get_pf_bit(struct llc_pdu_sn *pdu); void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type) { llc_pdu_un_hdr(skb)->ssap |= pdu_type; } /** * pdu_set_pf_bit - sets poll/final bit in LLC header * @pdu_frame: input frame that p/f bit must be set into it. * @bit_value: poll/final bit (0 or 1). * * This function sets poll/final bit in LLC header (based on type of PDU). * in I or S pdus, p/f bit is right bit of fourth byte in header. in U * pdus p/f bit is fifth bit of third byte. */ void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value) { u8 pdu_type; struct llc_pdu_sn *pdu; llc_pdu_decode_pdu_type(skb, &pdu_type); pdu = llc_pdu_sn_hdr(skb); switch (pdu_type) { case LLC_PDU_TYPE_I: case LLC_PDU_TYPE_S: pdu->ctrl_2 = (pdu->ctrl_2 & 0xFE) | bit_value; break; case LLC_PDU_TYPE_U: pdu->ctrl_1 |= (pdu->ctrl_1 & 0xEF) | (bit_value << 4); break; } } /** * llc_pdu_decode_pf_bit - extracs poll/final bit from LLC header * @skb: input skb that p/f bit must be extracted from it * @pf_bit: poll/final bit (0 or 1) * * This function extracts poll/final bit from LLC header (based on type of * PDU). In I or S pdus, p/f bit is right bit of fourth byte in header. In * U pdus p/f bit is fifth bit of third byte. */ void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit) { u8 pdu_type; struct llc_pdu_sn *pdu; llc_pdu_decode_pdu_type(skb, &pdu_type); pdu = llc_pdu_sn_hdr(skb); switch (pdu_type) { case LLC_PDU_TYPE_I: case LLC_PDU_TYPE_S: *pf_bit = pdu->ctrl_2 & LLC_S_PF_BIT_MASK; break; case LLC_PDU_TYPE_U: *pf_bit = (pdu->ctrl_1 & LLC_U_PF_BIT_MASK) >> 4; break; } } /** * llc_pdu_init_as_disc_cmd - Builds DISC PDU * @skb: Address of the skb to build * @p_bit: The P bit to set in the PDU * * Builds a pdu frame as a DISC command. */ void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_U; pdu->ctrl_1 |= LLC_2_PDU_CMD_DISC; pdu->ctrl_1 |= ((p_bit & 1) << 4) & LLC_U_PF_BIT_MASK; } /** * llc_pdu_init_as_i_cmd - builds I pdu * @skb: Address of the skb to build * @p_bit: The P bit to set in the PDU * @ns: The sequence number of the data PDU * @nr: The seq. number of the expected I PDU from the remote * * Builds a pdu frame as an I command. */ void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_I; pdu->ctrl_2 = 0; pdu->ctrl_2 |= (p_bit & LLC_I_PF_BIT_MASK); /* p/f bit */ pdu->ctrl_1 |= (ns << 1) & 0xFE; /* set N(S) in bits 2..8 */ pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ } /** * llc_pdu_init_as_rej_cmd - builds REJ PDU * @skb: Address of the skb to build * @p_bit: The P bit to set in the PDU * @nr: The seq. number of the expected I PDU from the remote * * Builds a pdu frame as a REJ command. */ void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_S; pdu->ctrl_1 |= LLC_2_PDU_CMD_REJ; pdu->ctrl_2 = 0; pdu->ctrl_2 |= p_bit & LLC_S_PF_BIT_MASK; pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ } /** * llc_pdu_init_as_rnr_cmd - builds RNR pdu * @skb: Address of the skb to build * @p_bit: The P bit to set in the PDU * @nr: The seq. number of the expected I PDU from the remote * * Builds a pdu frame as an RNR command. */ void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_S; pdu->ctrl_1 |= LLC_2_PDU_CMD_RNR; pdu->ctrl_2 = 0; pdu->ctrl_2 |= p_bit & LLC_S_PF_BIT_MASK; pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ } /** * llc_pdu_init_as_rr_cmd - Builds RR pdu * @skb: Address of the skb to build * @p_bit: The P bit to set in the PDU * @nr: The seq. number of the expected I PDU from the remote * * Builds a pdu frame as an RR command. */ void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_S; pdu->ctrl_1 |= LLC_2_PDU_CMD_RR; pdu->ctrl_2 = p_bit & LLC_S_PF_BIT_MASK; pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ } /** * llc_pdu_init_as_sabme_cmd - builds SABME pdu * @skb: Address of the skb to build * @p_bit: The P bit to set in the PDU * * Builds a pdu frame as an SABME command. */ void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_U; pdu->ctrl_1 |= LLC_2_PDU_CMD_SABME; pdu->ctrl_1 |= ((p_bit & 1) << 4) & LLC_U_PF_BIT_MASK; } /** * llc_pdu_init_as_dm_rsp - builds DM response pdu * @skb: Address of the skb to build * @f_bit: The F bit to set in the PDU * * Builds a pdu frame as a DM response. */ void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_U; pdu->ctrl_1 |= LLC_2_PDU_RSP_DM; pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK; } /** * llc_pdu_init_as_frmr_rsp - builds FRMR response PDU * @skb: Address of the frame to build * @prev_pdu: The rejected PDU frame * @f_bit: The F bit to set in the PDU * @vs: tx state vari value for the data link conn at the rejecting LLC * @vr: rx state var value for the data link conn at the rejecting LLC * @vzyxw: completely described in the IEEE Std 802.2 document (Pg 55) * * Builds a pdu frame as a FRMR response. */ void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu, u8 f_bit, u8 vs, u8 vr, u8 vzyxw) { struct llc_frmr_info *frmr_info; u8 prev_pf = 0; u8 *ctrl; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_U; pdu->ctrl_1 |= LLC_2_PDU_RSP_FRMR; pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK; frmr_info = (struct llc_frmr_info *)&pdu->ctrl_2; ctrl = (u8 *)&prev_pdu->ctrl_1; FRMR_INFO_SET_REJ_CNTRL(frmr_info,ctrl); FRMR_INFO_SET_Vs(frmr_info, vs); FRMR_INFO_SET_Vr(frmr_info, vr); prev_pf = llc_pdu_get_pf_bit(prev_pdu); FRMR_INFO_SET_C_R_BIT(frmr_info, prev_pf); FRMR_INFO_SET_INVALID_PDU_CTRL_IND(frmr_info, vzyxw); FRMR_INFO_SET_INVALID_PDU_INFO_IND(frmr_info, vzyxw); FRMR_INFO_SET_PDU_INFO_2LONG_IND(frmr_info, vzyxw); FRMR_INFO_SET_PDU_INVALID_Nr_IND(frmr_info, vzyxw); FRMR_INFO_SET_PDU_INVALID_Ns_IND(frmr_info, vzyxw); skb_put(skb, sizeof(struct llc_frmr_info)); } /** * llc_pdu_init_as_rr_rsp - builds RR response pdu * @skb: Address of the skb to build * @f_bit: The F bit to set in the PDU * @nr: The seq. number of the expected data PDU from the remote * * Builds a pdu frame as an RR response. */ void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_S; pdu->ctrl_1 |= LLC_2_PDU_RSP_RR; pdu->ctrl_2 = 0; pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK; pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ } /** * llc_pdu_init_as_rej_rsp - builds REJ response pdu * @skb: Address of the skb to build * @f_bit: The F bit to set in the PDU * @nr: The seq. number of the expected data PDU from the remote * * Builds a pdu frame as a REJ response. */ void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_S; pdu->ctrl_1 |= LLC_2_PDU_RSP_REJ; pdu->ctrl_2 = 0; pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK; pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ } /** * llc_pdu_init_as_rnr_rsp - builds RNR response pdu * @skb: Address of the frame to build * @f_bit: The F bit to set in the PDU * @nr: The seq. number of the expected data PDU from the remote * * Builds a pdu frame as an RNR response. */ void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_S; pdu->ctrl_1 |= LLC_2_PDU_RSP_RNR; pdu->ctrl_2 = 0; pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK; pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */ pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ } /** * llc_pdu_init_as_ua_rsp - builds UA response pdu * @skb: Address of the frame to build * @f_bit: The F bit to set in the PDU * * Builds a pdu frame as a UA response. */ void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); pdu->ctrl_1 = LLC_PDU_TYPE_U; pdu->ctrl_1 |= LLC_2_PDU_RSP_UA; pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK; } /** * llc_pdu_decode_pdu_type - designates PDU type * @skb: input skb that type of it must be designated. * @type: type of PDU (output argument). * * This function designates type of PDU (I, S or U). */ static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); if (pdu->ctrl_1 & 1) { if ((pdu->ctrl_1 & LLC_PDU_TYPE_U) == LLC_PDU_TYPE_U) *type = LLC_PDU_TYPE_U; else *type = LLC_PDU_TYPE_S; } else *type = LLC_PDU_TYPE_I; } /** * llc_pdu_get_pf_bit - extracts p/f bit of input PDU * @pdu: pointer to LLC header. * * This function extracts p/f bit of input PDU. at first examines type of * PDU and then extracts p/f bit. Returns the p/f bit. */ static u8 llc_pdu_get_pf_bit(struct llc_pdu_sn *pdu) { u8 pdu_type; u8 pf_bit = 0; if (pdu->ctrl_1 & 1) { if ((pdu->ctrl_1 & LLC_PDU_TYPE_U) == LLC_PDU_TYPE_U) pdu_type = LLC_PDU_TYPE_U; else pdu_type = LLC_PDU_TYPE_S; } else pdu_type = LLC_PDU_TYPE_I; switch (pdu_type) { case LLC_PDU_TYPE_I: case LLC_PDU_TYPE_S: pf_bit = pdu->ctrl_2 & LLC_S_PF_BIT_MASK; break; case LLC_PDU_TYPE_U: pf_bit = (pdu->ctrl_1 & LLC_U_PF_BIT_MASK) >> 4; break; } return pf_bit; }
gpl-2.0
val2k/linux
sound/soc/tegra/trimslice.c
198
5380
/* * trimslice.c - TrimSlice machine ASoC driver * * Copyright (C) 2011 - CompuLab, Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * Based on code copyright/by: * Author: Stephen Warren <swarren@nvidia.com> * Copyright (C) 2010-2011 - NVIDIA, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "../codecs/tlv320aic23.h" #include "tegra_asoc_utils.h" #define DRV_NAME "tegra-snd-trimslice" struct tegra_trimslice { struct tegra_asoc_utils_data util_data; }; static int trimslice_asoc_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_card *card = rtd->card; struct tegra_trimslice *trimslice = snd_soc_card_get_drvdata(card); int srate, mclk; int err; srate = params_rate(params); mclk = 128 * srate; err = tegra_asoc_utils_set_rate(&trimslice->util_data, srate, mclk); if (err < 0) { dev_err(card->dev, "Can't configure clocks\n"); return err; } err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk, SND_SOC_CLOCK_IN); if (err < 0) { dev_err(card->dev, "codec_dai clock not set\n"); return err; } return 0; } static const struct snd_soc_ops trimslice_asoc_ops = { .hw_params = trimslice_asoc_hw_params, }; static const struct snd_soc_dapm_widget trimslice_dapm_widgets[] = { SND_SOC_DAPM_HP("Line Out", NULL), SND_SOC_DAPM_LINE("Line In", NULL), }; static const struct snd_soc_dapm_route trimslice_audio_map[] = { {"Line Out", NULL, "LOUT"}, {"Line Out", NULL, "ROUT"}, {"LLINEIN", NULL, "Line In"}, {"RLINEIN", NULL, "Line In"}, }; static struct snd_soc_dai_link trimslice_tlv320aic23_dai = { .name = "TLV320AIC23", .stream_name = "AIC23", .codec_dai_name = "tlv320aic23-hifi", .ops = &trimslice_asoc_ops, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, }; static struct snd_soc_card snd_soc_trimslice = { .name = "tegra-trimslice", .owner = THIS_MODULE, .dai_link = &trimslice_tlv320aic23_dai, .num_links = 1, .dapm_widgets = trimslice_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(trimslice_dapm_widgets), .dapm_routes = trimslice_audio_map, .num_dapm_routes = ARRAY_SIZE(trimslice_audio_map), .fully_routed = true, }; static int tegra_snd_trimslice_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct snd_soc_card *card = &snd_soc_trimslice; struct tegra_trimslice *trimslice; int ret; trimslice = devm_kzalloc(&pdev->dev, sizeof(struct tegra_trimslice), GFP_KERNEL); if (!trimslice) return -ENOMEM; card->dev = &pdev->dev; snd_soc_card_set_drvdata(card, trimslice); trimslice_tlv320aic23_dai.codec_of_node = of_parse_phandle(np, "nvidia,audio-codec", 0); if (!trimslice_tlv320aic23_dai.codec_of_node) { dev_err(&pdev->dev, "Property 'nvidia,audio-codec' missing or invalid\n"); ret = -EINVAL; goto err; } trimslice_tlv320aic23_dai.cpu_of_node = of_parse_phandle(np, "nvidia,i2s-controller", 0); if (!trimslice_tlv320aic23_dai.cpu_of_node) { dev_err(&pdev->dev, "Property 'nvidia,i2s-controller' missing or invalid\n"); ret = -EINVAL; goto err; } trimslice_tlv320aic23_dai.platform_of_node = trimslice_tlv320aic23_dai.cpu_of_node; ret = tegra_asoc_utils_init(&trimslice->util_data, &pdev->dev); if (ret) goto err; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); goto err_fini_utils; } return 0; err_fini_utils: tegra_asoc_utils_fini(&trimslice->util_data); err: return ret; } static int tegra_snd_trimslice_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); struct tegra_trimslice *trimslice = snd_soc_card_get_drvdata(card); snd_soc_unregister_card(card); tegra_asoc_utils_fini(&trimslice->util_data); return 0; } static const struct of_device_id trimslice_of_match[] = { { .compatible = "nvidia,tegra-audio-trimslice", }, {}, }; MODULE_DEVICE_TABLE(of, trimslice_of_match); static struct platform_driver tegra_snd_trimslice_driver = { .driver = { .name = DRV_NAME, .of_match_table = trimslice_of_match, }, .probe = tegra_snd_trimslice_probe, .remove = tegra_snd_trimslice_remove, }; module_platform_driver(tegra_snd_trimslice_driver); MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>"); MODULE_DESCRIPTION("Trimslice machine ASoC driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
argakon/android_kernel_swift
drivers/usb/storage/sierra_ms.c
454
5501
#include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/usb.h> #include <linux/slab.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "scsiglue.h" #include "sierra_ms.h" #include "debug.h" #define SWIMS_USB_REQUEST_SetSwocMode 0x0B #define SWIMS_USB_REQUEST_GetSwocInfo 0x0A #define SWIMS_USB_INDEX_SetMode 0x0000 #define SWIMS_SET_MODE_Modem 0x0001 #define TRU_NORMAL 0x01 #define TRU_FORCE_MS 0x02 #define TRU_FORCE_MODEM 0x03 static unsigned int swi_tru_install = 1; module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def)," " 2=Force CD-Rom, 3=Force Modem)"); struct swoc_info { __u8 rev; __u8 reserved[8]; __u16 LinuxSKU; __u16 LinuxVer; __u8 reserved2[47]; } __attribute__((__packed__)); static bool containsFullLinuxPackage(struct swoc_info *swocInfo) { if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) || (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF)) return true; else return false; } static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) { int result; US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n"); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */ USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */ eSWocMode, /* __u16 value */ 0x0000, /* __u16 index */ NULL, /* void *data */ 0, /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ return result; } static int sierra_get_swoc_info(struct usb_device *udev, struct swoc_info *swocInfo) { int result; US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n"); result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */ USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */ 0, /* __u16 value */ 0, /* __u16 index */ (void *) swocInfo, /* void *data */ sizeof(struct swoc_info), /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU); swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer); return result; } static void debug_swoc(struct swoc_info *swocInfo) { US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev); US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU); US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer); } static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, char *buf) { struct swoc_info *swocInfo; struct usb_interface *intf = to_usb_interface(dev); struct usb_device *udev = interface_to_usbdev(intf); int result; if (swi_tru_install == TRU_FORCE_MS) { result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n"); } else { swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL); if (!swocInfo) { US_DEBUGP("SWIMS: Allocation failure\n"); snprintf(buf, PAGE_SIZE, "Error\n"); return -ENOMEM; } result = sierra_get_swoc_info(udev, swocInfo); if (result < 0) { US_DEBUGP("SWIMS: failed SWoC query\n"); kfree(swocInfo); snprintf(buf, PAGE_SIZE, "Error\n"); return -EIO; } debug_swoc(swocInfo); result = snprintf(buf, PAGE_SIZE, "REV=%02d SKU=%04X VER=%04X\n", swocInfo->rev, swocInfo->LinuxSKU, swocInfo->LinuxVer); kfree(swocInfo); } return result; } static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL); int sierra_ms_init(struct us_data *us) { int result, retries; signed long delay_t; struct swoc_info *swocInfo; struct usb_device *udev; struct Scsi_Host *sh; struct scsi_device *sd; delay_t = 2; retries = 3; result = 0; udev = us->pusb_dev; sh = us_to_host(us); sd = scsi_get_host_dev(sh); US_DEBUGP("SWIMS: sierra_ms_init called\n"); /* Force Modem mode */ if (swi_tru_install == TRU_FORCE_MODEM) { US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n"); result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); if (result < 0) US_DEBUGP("SWIMS: Failed to switch to modem mode.\n"); return -EIO; } /* Force Mass Storage mode (keep CD-Rom) */ else if (swi_tru_install == TRU_FORCE_MS) { US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n"); goto complete; } /* Normal TRU-Install Logic */ else { US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n"); swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL); if (!swocInfo) { US_DEBUGP("SWIMS: %s", "Allocation failure\n"); return -ENOMEM; } retries = 3; do { retries--; result = sierra_get_swoc_info(udev, swocInfo); if (result < 0) { US_DEBUGP("SWIMS: %s", "Failed SWoC query\n"); schedule_timeout_uninterruptible(2*HZ); } } while (retries && result < 0); if (result < 0) { US_DEBUGP("SWIMS: %s", "Completely failed SWoC query\n"); kfree(swocInfo); return -EIO; } debug_swoc(swocInfo); /* If there is not Linux software on the TRU-Install device * then switch to modem mode */ if (!containsFullLinuxPackage(swocInfo)) { US_DEBUGP("SWIMS: %s", "Switching to Modem Mode\n"); result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); if (result < 0) US_DEBUGP("SWIMS: Failed to switch modem\n"); kfree(swocInfo); return -EIO; } kfree(swocInfo); } complete: result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst); return 0; }
gpl-2.0
nightscape/yoga-900-kernel
tools/power/cpupower/bench/parse.c
1478
5378
/* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <time.h> #include <dirent.h> #include <sys/utsname.h> #include <sys/types.h> #include <sys/stat.h> #include "parse.h" #include "config.h" /** * converts priority string to priority * * @param str string that represents a scheduler priority * * @retval priority * @retval SCHED_ERR when the priority doesn't exit **/ enum sched_prio string_to_prio(const char *str) { if (strncasecmp("high", str, strlen(str)) == 0) return SCHED_HIGH; else if (strncasecmp("default", str, strlen(str)) == 0) return SCHED_DEFAULT; else if (strncasecmp("low", str, strlen(str)) == 0) return SCHED_LOW; else return SCHED_ERR; } /** * create and open logfile * * @param dir directory in which the logfile should be created * * @retval logfile on success * @retval NULL when the file can't be created **/ FILE *prepare_output(const char *dirname) { FILE *output = NULL; int len; char *filename; struct utsname sysdata; DIR *dir; dir = opendir(dirname); if (dir == NULL) { if (mkdir(dirname, 0755)) { perror("mkdir"); fprintf(stderr, "error: Cannot create dir %s\n", dirname); return NULL; } } len = strlen(dirname) + 30; filename = malloc(sizeof(char) * len); if (uname(&sysdata) == 0) { len += strlen(sysdata.nodename) + strlen(sysdata.release); filename = realloc(filename, sizeof(char) * len); if (filename == NULL) { perror("realloc"); return NULL; } snprintf(filename, len - 1, "%s/benchmark_%s_%s_%li.log", dirname, sysdata.nodename, sysdata.release, time(NULL)); } else { snprintf(filename, len - 1, "%s/benchmark_%li.log", dirname, time(NULL)); } dprintf("logilename: %s\n", filename); output = fopen(filename, "w+"); if (output == NULL) { perror("fopen"); fprintf(stderr, "error: unable to open logfile\n"); } fprintf(stdout, "Logfile: %s\n", filename); free(filename); fprintf(output, "#round load sleep performance powersave percentage\n"); return output; } /** * returns the default config * * @retval default config on success * @retval NULL when the output file can't be created **/ struct config *prepare_default_config() { struct config *config = malloc(sizeof(struct config)); dprintf("loading defaults\n"); config->sleep = 500000; config->load = 500000; config->sleep_step = 500000; config->load_step = 500000; config->cycles = 5; config->rounds = 50; config->cpu = 0; config->prio = SCHED_HIGH; config->verbose = 0; strncpy(config->governor, "ondemand", 8); config->output = stdout; #ifdef DEFAULT_CONFIG_FILE if (prepare_config(DEFAULT_CONFIG_FILE, config)) return NULL; #endif return config; } /** * parses config file and returns the config to the caller * * @param path config file name * * @retval 1 on error * @retval 0 on success **/ int prepare_config(const char *path, struct config *config) { size_t len = 0; char opt[16], val[32], *line = NULL; FILE *configfile; if (config == NULL) { fprintf(stderr, "error: config is NULL\n"); return 1; } configfile = fopen(path, "r"); if (configfile == NULL) { perror("fopen"); fprintf(stderr, "error: unable to read configfile\n"); free(config); return 1; } while (getline(&line, &len, configfile) != -1) { if (line[0] == '#' || line[0] == ' ' || line[0] == '\n') continue; if (sscanf(line, "%14s = %30s", opt, val) < 2) continue; dprintf("parsing: %s -> %s\n", opt, val); if (strcmp("sleep", opt) == 0) sscanf(val, "%li", &config->sleep); else if (strcmp("load", opt) == 0) sscanf(val, "%li", &config->load); else if (strcmp("load_step", opt) == 0) sscanf(val, "%li", &config->load_step); else if (strcmp("sleep_step", opt) == 0) sscanf(val, "%li", &config->sleep_step); else if (strcmp("cycles", opt) == 0) sscanf(val, "%u", &config->cycles); else if (strcmp("rounds", opt) == 0) sscanf(val, "%u", &config->rounds); else if (strcmp("verbose", opt) == 0) sscanf(val, "%u", &config->verbose); else if (strcmp("output", opt) == 0) config->output = prepare_output(val); else if (strcmp("cpu", opt) == 0) sscanf(val, "%u", &config->cpu); else if (strcmp("governor", opt) == 0) { strncpy(config->governor, val, sizeof(config->governor)); config->governor[sizeof(config->governor) - 1] = '\0'; } else if (strcmp("priority", opt) == 0) { if (string_to_prio(val) != SCHED_ERR) config->prio = string_to_prio(val); } } free(line); return 0; }
gpl-2.0
Silentlys/android_kernel_xiaomi_redmi2
drivers/input/misc/ixp4xx-beeper.c
2246
3975
/* * Generic IXP4xx beeper driver * * Copyright (C) 2005 Tower Technologies * * based on nslu2-io.c * Copyright (C) 2004 Karen Spearel * * Author: Alessandro Zummo <a.zummo@towertech.it> * Maintainers: http://www.nslu2-linux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <mach/hardware.h> MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); MODULE_DESCRIPTION("ixp4xx beeper driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ixp4xx-beeper"); static DEFINE_SPINLOCK(beep_lock); static void ixp4xx_spkr_control(unsigned int pin, unsigned int count) { unsigned long flags; spin_lock_irqsave(&beep_lock, flags); if (count) { gpio_line_config(pin, IXP4XX_GPIO_OUT); gpio_line_set(pin, IXP4XX_GPIO_LOW); *IXP4XX_OSRT2 = (count & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE; } else { gpio_line_config(pin, IXP4XX_GPIO_IN); gpio_line_set(pin, IXP4XX_GPIO_HIGH); *IXP4XX_OSRT2 = 0; } spin_unlock_irqrestore(&beep_lock, flags); } static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { unsigned int pin = (unsigned int) input_get_drvdata(dev); unsigned int count = 0; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } if (value > 20 && value < 32767) count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1; ixp4xx_spkr_control(pin, count); return 0; } static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id) { /* clear interrupt */ *IXP4XX_OSST = IXP4XX_OSST_TIMER_2_PEND; /* flip the beeper output */ *IXP4XX_GPIO_GPOUTR ^= (1 << (unsigned int) dev_id); return IRQ_HANDLED; } static int ixp4xx_spkr_probe(struct platform_device *dev) { struct input_dev *input_dev; int err; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_set_drvdata(input_dev, (void *) dev->id); input_dev->name = "ixp4xx beeper", input_dev->phys = "ixp4xx/gpio"; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x001f; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &dev->dev; input_dev->evbit[0] = BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); input_dev->event = ixp4xx_spkr_event; err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt, IRQF_NO_SUSPEND, "ixp4xx-beeper", (void *) dev->id); if (err) goto err_free_device; err = input_register_device(input_dev); if (err) goto err_free_irq; platform_set_drvdata(dev, input_dev); return 0; err_free_irq: free_irq(IRQ_IXP4XX_TIMER2, dev); err_free_device: input_free_device(input_dev); return err; } static int ixp4xx_spkr_remove(struct platform_device *dev) { struct input_dev *input_dev = platform_get_drvdata(dev); unsigned int pin = (unsigned int) input_get_drvdata(input_dev); input_unregister_device(input_dev); platform_set_drvdata(dev, NULL); /* turn the speaker off */ disable_irq(IRQ_IXP4XX_TIMER2); ixp4xx_spkr_control(pin, 0); free_irq(IRQ_IXP4XX_TIMER2, dev); return 0; } static void ixp4xx_spkr_shutdown(struct platform_device *dev) { struct input_dev *input_dev = platform_get_drvdata(dev); unsigned int pin = (unsigned int) input_get_drvdata(input_dev); /* turn off the speaker */ disable_irq(IRQ_IXP4XX_TIMER2); ixp4xx_spkr_control(pin, 0); } static struct platform_driver ixp4xx_spkr_platform_driver = { .driver = { .name = "ixp4xx-beeper", .owner = THIS_MODULE, }, .probe = ixp4xx_spkr_probe, .remove = ixp4xx_spkr_remove, .shutdown = ixp4xx_spkr_shutdown, }; module_platform_driver(ixp4xx_spkr_platform_driver);
gpl-2.0
BlackBox-Kernel/blackbox_tomato_lp
net/dns_resolver/dns_key.c
2246
7934
/* Key type used to cache DNS lookups made by the kernel * * See Documentation/networking/dns_resolver.txt * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/keyctl.h> #include <linux/err.h> #include <linux/seq_file.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" MODULE_DESCRIPTION("DNS Resolver"); MODULE_AUTHOR("Wang Lei"); MODULE_LICENSE("GPL"); unsigned int dns_resolver_debug; module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); const struct cred *dns_resolver_cache; #define DNS_ERRORNO_OPTION "dnserror" /* * Instantiate a user defined key for dns_resolver. * * The data must be a NUL-terminated string, with the NUL char accounted in * datalen. * * If the data contains a '#' characters, then we take the clause after each * one to be an option of the form 'key=value'. The actual data of interest is * the string leading up to the first '#'. For instance: * * "ip1,ip2,...#foo=bar" */ static int dns_resolver_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct user_key_payload *upayload; unsigned long derrno; int ret; size_t datalen = prep->datalen, result_len = 0; const char *data = prep->data, *end, *opt; kenter("%%%d,%s,'%*.*s',%zu", key->serial, key->description, (int)datalen, (int)datalen, data, datalen); if (datalen <= 1 || !data || data[datalen - 1] != '\0') return -EINVAL; datalen--; /* deal with any options embedded in the data */ end = data + datalen; opt = memchr(data, '#', datalen); if (!opt) { /* no options: the entire data is the result */ kdebug("no options"); result_len = datalen; } else { const char *next_opt; result_len = opt - data; opt++; kdebug("options: '%s'", opt); do { const char *eq; int opt_len, opt_nlen, opt_vlen, tmp; next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; if (!opt_len) { printk(KERN_WARNING "Empty option to dns_resolver key %d\n", key->serial); return -EINVAL; } eq = memchr(opt, '=', opt_len) ?: end; opt_nlen = eq - opt; eq++; opt_vlen = next_opt - eq; /* will be -1 if no value */ tmp = opt_vlen >= 0 ? opt_vlen : 0; kdebug("option '%*.*s' val '%*.*s'", opt_nlen, opt_nlen, opt, tmp, tmp, eq); /* see if it's an error number representing a DNS error * that's to be recorded as the result in this key */ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { kdebug("dns error number option"); if (opt_vlen <= 0) goto bad_option_value; ret = strict_strtoul(eq, 10, &derrno); if (ret < 0) goto bad_option_value; if (derrno < 1 || derrno > 511) goto bad_option_value; kdebug("dns error no. = %lu", derrno); key->type_data.x[0] = -derrno; continue; } bad_option_value: printk(KERN_WARNING "Option '%*.*s' to dns_resolver key %d:" " bad/missing value\n", opt_nlen, opt_nlen, opt, key->serial); return -EINVAL; } while (opt = next_opt + 1, opt < end); } /* don't cache the result if we're caching an error saying there's no * result */ if (key->type_data.x[0]) { kleave(" = 0 [h_error %ld]", key->type_data.x[0]); return 0; } kdebug("store result"); ret = key_payload_reserve(key, result_len); if (ret < 0) return -EINVAL; upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL); if (!upayload) { kleave(" = -ENOMEM"); return -ENOMEM; } upayload->datalen = result_len; memcpy(upayload->data, data, result_len); upayload->data[result_len] = '\0'; rcu_assign_pointer(key->payload.data, upayload); kleave(" = 0"); return 0; } /* * The description is of the form "[<type>:]<domain_name>" * * The domain name may be a simple name or an absolute domain name (which * should end with a period). The domain name is case-independent. */ static int dns_resolver_match(const struct key *key, const void *description) { int slen, dlen, ret = 0; const char *src = key->description, *dsp = description; kenter("%s,%s", src, dsp); if (!src || !dsp) goto no_match; if (strcasecmp(src, dsp) == 0) goto matched; slen = strlen(src); dlen = strlen(dsp); if (slen <= 0 || dlen <= 0) goto no_match; if (src[slen - 1] == '.') slen--; if (dsp[dlen - 1] == '.') dlen--; if (slen != dlen || strncasecmp(src, dsp, slen) != 0) goto no_match; matched: ret = 1; no_match: kleave(" = %d", ret); return ret; } /* * Describe a DNS key */ static void dns_resolver_describe(const struct key *key, struct seq_file *m) { int err = key->type_data.x[0]; seq_puts(m, key->description); if (key_is_instantiated(key)) { if (err) seq_printf(m, ": %d", err); else seq_printf(m, ": %u", key->datalen); } } /* * read the DNS data * - the key's semaphore is read-locked */ static long dns_resolver_read(const struct key *key, char __user *buffer, size_t buflen) { if (key->type_data.x[0]) return key->type_data.x[0]; return user_read(key, buffer, buflen); } struct key_type key_type_dns_resolver = { .name = "dns_resolver", .instantiate = dns_resolver_instantiate, .match = dns_resolver_match, .revoke = user_revoke, .destroy = user_destroy, .describe = dns_resolver_describe, .read = dns_resolver_read, }; static int __init init_dns_resolver(void) { struct cred *cred; struct key *keyring; int ret; /* create an override credential set with a special thread keyring in * which DNS requests are cached * * this is used to prevent malicious redirections from being installed * with add_key(). */ cred = prepare_kernel_cred(NULL); if (!cred) return -ENOMEM; keyring = keyring_alloc(".dns_resolver", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&key_type_dns_resolver); if (ret < 0) goto failed_put_key; /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; dns_resolver_cache = cred; kdebug("DNS resolver keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } static void __exit exit_dns_resolver(void) { key_revoke(dns_resolver_cache->thread_keyring); unregister_key_type(&key_type_dns_resolver); put_cred(dns_resolver_cache); } module_init(init_dns_resolver) module_exit(exit_dns_resolver) MODULE_LICENSE("GPL");
gpl-2.0
androidarmv6/android_kernel_samsung_msm
drivers/gpu/drm/tdfx/tdfx_drv.c
2502
2569
/* tdfx_drv.c -- tdfx driver -*- linux-c -*- * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * Daryll Strauss <daryll@valinux.com> * Gareth Hughes <gareth@valinux.com> */ #include "drmP.h" #include "tdfx_drv.h" #include "drm_pciids.h" static struct pci_device_id pciidlist[] = { tdfx_PCI_IDS }; static struct drm_driver driver = { .driver_features = DRIVER_USE_MTRR, .reclaim_buffers = drm_core_reclaim_buffers, .fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, .llseek = noop_llseek, }, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static struct pci_driver tdfx_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, }; static int __init tdfx_init(void) { return drm_pci_init(&driver, &tdfx_pci_driver); } static void __exit tdfx_exit(void) { drm_pci_exit(&driver, &tdfx_pci_driver); } module_init(tdfx_init); module_exit(tdfx_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
wisniew/Harfix3
arch/sh/drivers/dma/dmabrg.c
3782
5336
/* * SH7760 DMABRG IRQ handling * * (c) 2007 MSC Vertriebsges.m.b.H, Manuel Lauss <mlau@msc-ge.com> * licensed under the GPLv2. * */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/slab.h> #include <asm/dma.h> #include <asm/dmabrg.h> #include <asm/io.h> /* * The DMABRG is a special DMA unit within the SH7760. It does transfers * from USB-SRAM/Audio units to main memory (and also the LCDC; but that * part is sensibly placed in the LCDC registers and requires no irqs) * It has 3 IRQ lines which trigger 10 events, and works independently * from the traditional SH DMAC (although it blocks usage of DMAC 0) * * BRGIRQID | component | dir | meaning | source * ----------------------------------------------------- * 0 | USB-DMA | ... | xfer done | DMABRGI1 * 1 | USB-UAE | ... | USB addr err.| DMABRGI0 * 2 | HAC0/SSI0 | play| all done | DMABRGI1 * 3 | HAC0/SSI0 | play| half done | DMABRGI2 * 4 | HAC0/SSI0 | rec | all done | DMABRGI1 * 5 | HAC0/SSI0 | rec | half done | DMABRGI2 * 6 | HAC1/SSI1 | play| all done | DMABRGI1 * 7 | HAC1/SSI1 | play| half done | DMABRGI2 * 8 | HAC1/SSI1 | rec | all done | DMABRGI1 * 9 | HAC1/SSI1 | rec | half done | DMABRGI2 * * all can be enabled/disabled in the DMABRGCR register, * as well as checked if they occurred. * * DMABRGI0 services USB DMA Address errors, but it still must be * enabled/acked in the DMABRGCR register. USB-DMA complete indicator * is grouped together with the audio buffer end indicators, too bad... * * DMABRGCR: Bits 31-24: audio-dma ENABLE flags, * Bits 23-16: audio-dma STATUS flags, * Bits 9-8: USB error/xfer ENABLE, * Bits 1-0: USB error/xfer STATUS. * Ack an IRQ by writing 0 to the STATUS flag. * Mask IRQ by writing 0 to ENABLE flag. * * Usage is almost like with any other IRQ: * dmabrg_request_irq(BRGIRQID, handler, data) * dmabrg_free_irq(BRGIRQID) * * handler prototype: void brgirqhandler(void *data) */ #define DMARSRA 0xfe090000 #define DMAOR 0xffa00040 #define DMACHCR0 0xffa0000c #define DMABRGCR 0xfe3c0000 #define DMAOR_BRG 0x0000c000 #define DMAOR_DMEN 0x00000001 #define DMABRGI0 68 #define DMABRGI1 69 #define DMABRGI2 70 struct dmabrg_handler { void (*handler)(void *); void *data; } *dmabrg_handlers; static inline void dmabrg_call_handler(int i) { dmabrg_handlers[i].handler(dmabrg_handlers[i].data); } /* * main DMABRG irq handler. It acks irqs and then * handles every set and unmasked bit sequentially. * No locking and no validity checks; it should be * as fast as possible (audio!) */ static irqreturn_t dmabrg_irq(int irq, void *data) { unsigned long dcr; unsigned int i; dcr = __raw_readl(DMABRGCR); __raw_writel(dcr & ~0x00ff0003, DMABRGCR); /* ack all */ dcr &= dcr >> 8; /* ignore masked */ /* USB stuff, get it out of the way first */ if (dcr & 1) dmabrg_call_handler(DMABRGIRQ_USBDMA); if (dcr & 2) dmabrg_call_handler(DMABRGIRQ_USBDMAERR); /* Audio */ dcr >>= 16; while (dcr) { i = __ffs(dcr); dcr &= dcr - 1; dmabrg_call_handler(i + DMABRGIRQ_A0TXF); } return IRQ_HANDLED; } static void dmabrg_disable_irq(unsigned int dmairq) { unsigned long dcr; dcr = __raw_readl(DMABRGCR); dcr &= ~(1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8)); __raw_writel(dcr, DMABRGCR); } static void dmabrg_enable_irq(unsigned int dmairq) { unsigned long dcr; dcr = __raw_readl(DMABRGCR); dcr |= (1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8)); __raw_writel(dcr, DMABRGCR); } int dmabrg_request_irq(unsigned int dmairq, void(*handler)(void*), void *data) { if ((dmairq > 9) || !handler) return -ENOENT; if (dmabrg_handlers[dmairq].handler) return -EBUSY; dmabrg_handlers[dmairq].handler = handler; dmabrg_handlers[dmairq].data = data; dmabrg_enable_irq(dmairq); return 0; } EXPORT_SYMBOL_GPL(dmabrg_request_irq); void dmabrg_free_irq(unsigned int dmairq) { if (likely(dmairq < 10)) { dmabrg_disable_irq(dmairq); dmabrg_handlers[dmairq].handler = NULL; dmabrg_handlers[dmairq].data = NULL; } } EXPORT_SYMBOL_GPL(dmabrg_free_irq); static int __init dmabrg_init(void) { unsigned long or; int ret; dmabrg_handlers = kzalloc(10 * sizeof(struct dmabrg_handler), GFP_KERNEL); if (!dmabrg_handlers) return -ENOMEM; #ifdef CONFIG_SH_DMA /* request DMAC channel 0 before anyone else can get it */ ret = request_dma(0, "DMAC 0 (DMABRG)"); if (ret < 0) printk(KERN_INFO "DMABRG: DMAC ch0 not reserved!\n"); #endif __raw_writel(0, DMABRGCR); __raw_writel(0, DMACHCR0); __raw_writel(0x94000000, DMARSRA); /* enable DMABRG in DMAC 0 */ /* enable DMABRG mode, enable the DMAC */ or = __raw_readl(DMAOR); __raw_writel(or | DMAOR_BRG | DMAOR_DMEN, DMAOR); ret = request_irq(DMABRGI0, dmabrg_irq, IRQF_DISABLED, "DMABRG USB address error", NULL); if (ret) goto out0; ret = request_irq(DMABRGI1, dmabrg_irq, IRQF_DISABLED, "DMABRG Transfer End", NULL); if (ret) goto out1; ret = request_irq(DMABRGI2, dmabrg_irq, IRQF_DISABLED, "DMABRG Transfer Half", NULL); if (ret == 0) return ret; free_irq(DMABRGI1, 0); out1: free_irq(DMABRGI0, 0); out0: kfree(dmabrg_handlers); return ret; } subsys_initcall(dmabrg_init);
gpl-2.0
AAccount/android_kernel_samsung_p4
arch/ia64/sn/pci/pci_dma.c
4038
13082
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. * * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for * a description of how these routines should be used. */ #include <linux/gfp.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <asm/dma.h> #include <asm/sn/intr.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/sn_sal.h> #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) /** * sn_dma_supported - test a DMA mask * @dev: device to test * @mask: DMA mask to test * * Return whether the given PCI device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during PCI bus mastering, then you would pass 0x00ffffff as the mask to * this function. Of course, SN only supports devices that have 32 or more * address bits when using the PMU. */ static int sn_dma_supported(struct device *dev, u64 mask) { BUG_ON(dev->bus != &pci_bus_type); if (mask < 0x7fffffff) return 0; return 1; } /** * sn_dma_set_mask - set the DMA mask * @dev: device to set * @dma_mask: new mask * * Set @dev's DMA mask if the hw supports it. */ int sn_dma_set_mask(struct device *dev, u64 dma_mask) { BUG_ON(dev->bus != &pci_bus_type); if (!sn_dma_supported(dev, dma_mask)) return 0; *dev->dma_mask = dma_mask; return 1; } EXPORT_SYMBOL(sn_dma_set_mask); /** * sn_dma_alloc_coherent - allocate memory for coherent DMA * @dev: device to allocate for * @size: size of the region * @dma_handle: DMA (bus) address * @flags: memory allocation flags * * dma_alloc_coherent() returns a pointer to a memory region suitable for * coherent DMA traffic to/from a PCI device. On SN platforms, this means * that @dma_handle will have the %PCIIO_DMA_CMD flag set. * * This interface is usually used for "command" streams (e.g. the command * queue for a SCSI controller). See Documentation/DMA-API.txt for * more information. */ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t flags) { void *cpuaddr; unsigned long phys_addr; int node; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); /* * Allocate the memory. */ node = pcibus_to_node(pdev->bus); if (likely(node >=0)) { struct page *p = alloc_pages_exact_node(node, flags, get_order(size)); if (likely(p)) cpuaddr = page_address(p); else return NULL; } else cpuaddr = (void *)__get_free_pages(flags, get_order(size)); if (unlikely(!cpuaddr)) return NULL; memset(cpuaddr, 0x0, size); /* physical addr. of the memory we just got */ phys_addr = __pa(cpuaddr); /* * 64 bit address translations should never fail. * 32 bit translations can fail if there are insufficient mapping * resources. */ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); if (!*dma_handle) { printk(KERN_ERR "%s: out of ATEs\n", __func__); free_pages((unsigned long)cpuaddr, get_order(size)); return NULL; } return cpuaddr; } /** * sn_pci_free_coherent - free memory associated with coherent DMAable region * @dev: device to free for * @size: size to free * @cpu_addr: kernel virtual address to free * @dma_handle: DMA address associated with this region * * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping * any associated IOMMU mappings. */ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); provider->dma_unmap(pdev, dma_handle, 0); free_pages((unsigned long)cpu_addr, get_order(size)); } /** * sn_dma_map_single_attrs - map a single page for DMA * @dev: device to map for * @cpu_addr: kernel virtual address of the region to map * @size: size of the region * @direction: DMA direction * @attrs: optional dma attributes * * Map the region pointed to by @cpu_addr for DMA and return the * DMA address. * * We map this to the one step pcibr_dmamap_trans interface rather than * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have * no way of saving the dmamap handle from the alloc to later free * (which is pretty much unacceptable). * * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with * dma_map_consistent() so that writes force a flush of pending DMA. * (See "SGI Altix Architecture Considerations for Linux Device Drivers", * Document Number: 007-4763-001) * * TODO: simplify our interface; * figure out how to save dmamap handle so can use two step. */ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { void *cpu_addr = page_address(page) + offset; dma_addr_t dma_addr; unsigned long phys_addr; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int dmabarr; dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(dev->bus != &pci_bus_type); phys_addr = __pa(cpu_addr); if (dmabarr) dma_addr = provider->dma_map_consistent(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); else dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); if (!dma_addr) { printk(KERN_ERR "%s: out of ATEs\n", __func__); return 0; } return dma_addr; } /** * sn_dma_unmap_single_attrs - unamp a DMA mapped page * @dev: device to sync * @dma_addr: DMA address to sync * @size: size of region * @direction: DMA direction * @attrs: optional dma attributes * * This routine is supposed to sync the DMA region specified * by @dma_handle into the coherence domain. On SN, we're always cache * coherent, so we just need to free any ATEs associated with this mapping. */ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); provider->dma_unmap(pdev, dma_addr, dir); } /** * sn_dma_unmap_sg - unmap a DMA scatterlist * @dev: device to unmap * @sg: scatterlist to unmap * @nhwentries: number of scatterlist entries * @direction: DMA direction * @attrs: optional dma attributes * * Unmap a set of streaming mode DMA translations. */ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, struct dma_attrs *attrs) { int i; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); struct scatterlist *sg; BUG_ON(dev->bus != &pci_bus_type); for_each_sg(sgl, sg, nhwentries, i) { provider->dma_unmap(pdev, sg->dma_address, dir); sg->dma_address = (dma_addr_t) NULL; sg->dma_length = 0; } } /** * sn_dma_map_sg - map a scatterlist for DMA * @dev: device to map for * @sg: scatterlist to map * @nhwentries: number of entries * @direction: direction of the DMA transaction * @attrs: optional dma attributes * * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with * dma_map_consistent() so that writes force a flush of pending DMA. * (See "SGI Altix Architecture Considerations for Linux Device Drivers", * Document Number: 007-4763-001) * * Maps each entry of @sg for DMA. */ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, struct dma_attrs *attrs) { unsigned long phys_addr; struct scatterlist *saved_sg = sgl, *sg; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int i; int dmabarr; dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(dev->bus != &pci_bus_type); /* * Setup a DMA address for each entry in the scatterlist. */ for_each_sg(sgl, sg, nhwentries, i) { dma_addr_t dma_addr; phys_addr = SG_ENT_PHYS_ADDRESS(sg); if (dmabarr) dma_addr = provider->dma_map_consistent(pdev, phys_addr, sg->length, SN_DMA_ADDR_PHYS); else dma_addr = provider->dma_map(pdev, phys_addr, sg->length, SN_DMA_ADDR_PHYS); sg->dma_address = dma_addr; if (!sg->dma_address) { printk(KERN_ERR "%s: out of ATEs\n", __func__); /* * Free any successfully allocated entries. */ if (i > 0) sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); return 0; } sg->dma_length = sg->length; } return nhwentries; } static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { BUG_ON(dev->bus != &pci_bus_type); } static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } u64 sn_dma_get_required_mask(struct device *dev) { return DMA_BIT_MASK(64); } EXPORT_SYMBOL_GPL(sn_dma_get_required_mask); char *sn_pci_get_legacy_mem(struct pci_bus *bus) { if (!SN_PCIBUS_BUSSOFT(bus)) return ERR_PTR(-ENODEV); return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET); } int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) { unsigned long addr; int ret; struct ia64_sal_retval isrv; /* * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work * around hw issues at the pci bus level. SGI proms older than * 4.10 don't implement this. */ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, pci_domain_nr(bus), bus->number, 0, /* io */ 0, /* read */ port, size, __pa(val)); if (isrv.status == 0) return size; /* * If the above failed, retry using the SAL_PROBE call which should * be present in all proms (but which cannot work round PCI chipset * bugs). This code is retained for compatibility with old * pre-4.10 proms, and should be removed at some point in the future. */ if (!SN_PCIBUS_BUSSOFT(bus)) return -ENODEV; addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; addr += port; ret = ia64_sn_probe_mem(addr, (long)size, (void *)val); if (ret == 2) return -EINVAL; if (ret == 1) *val = -1; return size; } int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) { int ret = size; unsigned long paddr; unsigned long *addr; struct ia64_sal_retval isrv; /* * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work * around hw issues at the pci bus level. SGI proms older than * 4.10 don't implement this. */ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, pci_domain_nr(bus), bus->number, 0, /* io */ 1, /* write */ port, size, __pa(&val)); if (isrv.status == 0) return size; /* * If the above failed, retry using the SAL_PROBE call which should * be present in all proms (but which cannot work round PCI chipset * bugs). This code is retained for compatibility with old * pre-4.10 proms, and should be removed at some point in the future. */ if (!SN_PCIBUS_BUSSOFT(bus)) { ret = -ENODEV; goto out; } /* Put the phys addr in uncached space */ paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; paddr += port; addr = (unsigned long *)paddr; switch (size) { case 1: *(volatile u8 *)(addr) = (u8)(val); break; case 2: *(volatile u16 *)(addr) = (u16)(val); break; case 4: *(volatile u32 *)(addr) = (u32)(val); break; default: ret = -EINVAL; break; } out: return ret; } static struct dma_map_ops sn_dma_ops = { .alloc_coherent = sn_dma_alloc_coherent, .free_coherent = sn_dma_free_coherent, .map_page = sn_dma_map_page, .unmap_page = sn_dma_unmap_page, .map_sg = sn_dma_map_sg, .unmap_sg = sn_dma_unmap_sg, .sync_single_for_cpu = sn_dma_sync_single_for_cpu, .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, .sync_single_for_device = sn_dma_sync_single_for_device, .sync_sg_for_device = sn_dma_sync_sg_for_device, .mapping_error = sn_dma_mapping_error, .dma_supported = sn_dma_supported, }; void sn_dma_init(void) { dma_ops = &sn_dma_ops; }
gpl-2.0
georgewhr/dbwrt
drivers/net/wimax/i2400m/usb-notif.c
4294
8087
/* * Intel Wireless WiMAX Connection 2400m over USB * Notification handling * * * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <linux-wimax@intel.com> * Yanir Lubetkin <yanirx.lubetkin@intel.com> * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * - Initial implementation * * * The notification endpoint is active when the device is not in boot * mode; in here we just read and get notifications; based on those, * we act to either reinitialize the device after a reboot or to * submit a RX request. * * ROADMAP * * i2400mu_usb_notification_setup() * * i2400mu_usb_notification_release() * * i2400mu_usb_notification_cb() Called when a URB is ready * i2400mu_notif_grok() * i2400m_is_boot_barker() * i2400m_dev_reset_handle() * i2400mu_rx_kick() */ #include <linux/usb.h> #include <linux/slab.h> #include "i2400m-usb.h" #define D_SUBMODULE notif #include "usb-debug-levels.h" static const __le32 i2400m_ZERO_BARKER[4] = { 0, 0, 0, 0 }; /* * Process a received notification * * In normal operation mode, we can only receive two types of payloads * on the notification endpoint: * * - a reboot barker, we do a bootstrap (the device has reseted). * * - a block of zeroes: there is pending data in the IN endpoint */ static int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf, size_t buf_len) { int ret; struct device *dev = &i2400mu->usb_iface->dev; struct i2400m *i2400m = &i2400mu->i2400m; d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n", i2400mu, buf, buf_len); ret = -EIO; if (buf_len < sizeof(i2400m_ZERO_BARKER)) /* Not a bug, just ignore */ goto error_bad_size; ret = 0; if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) { i2400mu_rx_kick(i2400mu); goto out; } ret = i2400m_is_boot_barker(i2400m, buf, buf_len); if (unlikely(ret >= 0)) ret = i2400m_dev_reset_handle(i2400m, "device rebooted"); else /* Unknown or unexpected data in the notif message */ i2400m_unknown_barker(i2400m, buf, buf_len); error_bad_size: out: d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n", i2400mu, buf, buf_len, ret); return ret; } /* * URB callback for the notification endpoint * * @urb: the urb received from the notification endpoint * * This function will just process the USB side of the transaction, * checking everything is fine, pass the processing to * i2400m_notification_grok() and resubmit the URB. */ static void i2400mu_notification_cb(struct urb *urb) { int ret; struct i2400mu *i2400mu = urb->context; struct device *dev = &i2400mu->usb_iface->dev; d_fnstart(4, dev, "(urb %p status %d actual_length %d)\n", urb, urb->status, urb->actual_length); ret = urb->status; switch (ret) { case 0: ret = i2400mu_notification_grok(i2400mu, urb->transfer_buffer, urb->actual_length); if (ret == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) goto error_exceeded; if (ret == -ENOMEM) /* uff...power cycle? shutdown? */ goto error_exceeded; break; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* ditto */ case -ESHUTDOWN: /* URB killed */ case -ECONNRESET: /* disconnection */ goto out; /* Notify around */ default: /* Some error? */ if (edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) goto error_exceeded; dev_err(dev, "notification: URB error %d, retrying\n", urb->status); } usb_mark_last_busy(i2400mu->usb_dev); ret = usb_submit_urb(i2400mu->notif_urb, GFP_ATOMIC); switch (ret) { case 0: case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* ditto */ case -ESHUTDOWN: /* URB killed */ case -ECONNRESET: /* disconnection */ break; /* just ignore */ default: /* Some error? */ dev_err(dev, "notification: cannot submit URB: %d\n", ret); goto error_submit; } d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", urb, urb->status, urb->actual_length); return; error_exceeded: dev_err(dev, "maximum errors in notification URB exceeded; " "resetting device\n"); error_submit: usb_queue_reset_device(i2400mu->usb_iface); out: d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", urb, urb->status, urb->actual_length); } /* * setup the notification endpoint * * @i2400m: device descriptor * * This procedure prepares the notification urb and handler for receiving * unsolicited barkers from the device. */ int i2400mu_notification_setup(struct i2400mu *i2400mu) { struct device *dev = &i2400mu->usb_iface->dev; int usb_pipe, ret = 0; struct usb_endpoint_descriptor *epd; char *buf; d_fnstart(4, dev, "(i2400m %p)\n", i2400mu); buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA); if (buf == NULL) { ret = -ENOMEM; goto error_buf_alloc; } i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL); if (!i2400mu->notif_urb) { ret = -ENOMEM; dev_err(dev, "notification: cannot allocate URB\n"); goto error_alloc_urb; } epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.notification); usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe, buf, I2400MU_MAX_NOTIFICATION_LEN, i2400mu_notification_cb, i2400mu, epd->bInterval); ret = usb_submit_urb(i2400mu->notif_urb, GFP_KERNEL); if (ret != 0) { dev_err(dev, "notification: cannot submit URB: %d\n", ret); goto error_submit; } d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret); return ret; error_submit: usb_free_urb(i2400mu->notif_urb); error_alloc_urb: kfree(buf); error_buf_alloc: d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret); return ret; } /* * Tear down of the notification mechanism * * @i2400m: device descriptor * * Kill the interrupt endpoint urb, free any allocated resources. * * We need to check if we have done it before as for example, * _suspend() call this; if after a suspend() we get a _disconnect() * (as the case is when hibernating), nothing bad happens. */ void i2400mu_notification_release(struct i2400mu *i2400mu) { struct device *dev = &i2400mu->usb_iface->dev; d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); if (i2400mu->notif_urb != NULL) { usb_kill_urb(i2400mu->notif_urb); kfree(i2400mu->notif_urb->transfer_buffer); usb_free_urb(i2400mu->notif_urb); i2400mu->notif_urb = NULL; } d_fnend(4, dev, "(i2400mu %p)\n", i2400mu); }
gpl-2.0
kevin-a-naude/linux
arch/m32r/kernel/module.c
4550
6308
/* Kernel module help for M32R. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt...) #endif #define COPY_UNALIGNED_WORD(sw, tw, align) \ { \ void *__s = &(sw), *__t = &(tw); \ unsigned short *__s2 = __s, *__t2 =__t; \ unsigned char *__s1 = __s, *__t1 =__t; \ switch ((align)) \ { \ case 0: \ *(unsigned long *) __t = *(unsigned long *) __s; \ break; \ case 2: \ *__t2++ = *__s2++; \ *__t2 = *__s2; \ break; \ default: \ *__t1++ = *__s1++; \ *__t1++ = *__s1++; \ *__t1++ = *__s1++; \ *__t1 = *__s1; \ break; \ } \ } #define COPY_UNALIGNED_HWORD(sw, tw, align) \ { \ void *__s = &(sw), *__t = &(tw); \ unsigned short *__s2 = __s, *__t2 =__t; \ unsigned char *__s1 = __s, *__t1 =__t; \ switch ((align)) \ { \ case 0: \ *__t2 = *__s2; \ break; \ default: \ *__t1++ = *__s1++; \ *__t1 = *__s1; \ break; \ } \ } int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; Elf32_Addr relocation; uint32_t *location; uint32_t value; unsigned short *hlocation; unsigned short hvalue; int svalue; int align; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); relocation = sym->st_value + rel[i].r_addend; align = (int)location & 3; switch (ELF32_R_TYPE(rel[i].r_info)) { case R_M32R_32_RELA: COPY_UNALIGNED_WORD (*location, value, align); value += relocation; COPY_UNALIGNED_WORD (value, *location, align); break; case R_M32R_HI16_ULO_RELA: COPY_UNALIGNED_WORD (*location, value, align); relocation = (relocation >>16) & 0xffff; /* RELA must has 0 at relocation field. */ value += relocation; COPY_UNALIGNED_WORD (value, *location, align); break; case R_M32R_HI16_SLO_RELA: COPY_UNALIGNED_WORD (*location, value, align); if (relocation & 0x8000) relocation += 0x10000; relocation = (relocation >>16) & 0xffff; /* RELA must has 0 at relocation field. */ value += relocation; COPY_UNALIGNED_WORD (value, *location, align); break; case R_M32R_16_RELA: hlocation = (unsigned short *)location; relocation = relocation & 0xffff; /* RELA must has 0 at relocation field. */ hvalue = relocation; COPY_UNALIGNED_WORD (hvalue, *hlocation, align); break; case R_M32R_SDA16_RELA: case R_M32R_LO16_RELA: COPY_UNALIGNED_WORD (*location, value, align); relocation = relocation & 0xffff; /* RELA must has 0 at relocation field. */ value += relocation; COPY_UNALIGNED_WORD (value, *location, align); break; case R_M32R_24_RELA: COPY_UNALIGNED_WORD (*location, value, align); relocation = relocation & 0xffffff; /* RELA must has 0 at relocation field. */ value += relocation; COPY_UNALIGNED_WORD (value, *location, align); break; case R_M32R_18_PCREL_RELA: relocation = (relocation - (Elf32_Addr) location); if (relocation < -0x20000 || 0x1fffc < relocation) { printk(KERN_ERR "module %s: relocation overflow: %u\n", me->name, relocation); return -ENOEXEC; } COPY_UNALIGNED_WORD (*location, value, align); if (value & 0xffff) { /* RELA must has 0 at relocation field. */ printk(KERN_ERR "module %s: illegal relocation field: %u\n", me->name, value); return -ENOEXEC; } relocation = (relocation >> 2) & 0xffff; value += relocation; COPY_UNALIGNED_WORD (value, *location, align); break; case R_M32R_10_PCREL_RELA: hlocation = (unsigned short *)location; relocation = (relocation - (Elf32_Addr) location); COPY_UNALIGNED_HWORD (*hlocation, hvalue, align); svalue = (int)hvalue; svalue = (signed char)svalue << 2; relocation += svalue; relocation = (relocation >> 2) & 0xff; hvalue = hvalue & 0xff00; hvalue += relocation; COPY_UNALIGNED_HWORD (hvalue, *hlocation, align); break; case R_M32R_26_PCREL_RELA: relocation = (relocation - (Elf32_Addr) location); if (relocation < -0x2000000 || 0x1fffffc < relocation) { printk(KERN_ERR "module %s: relocation overflow: %u\n", me->name, relocation); return -ENOEXEC; } COPY_UNALIGNED_WORD (*location, value, align); if (value & 0xffffff) { /* RELA must has 0 at relocation field. */ printk(KERN_ERR "module %s: illegal relocation field: %u\n", me->name, value); return -ENOEXEC; } relocation = (relocation >> 2) & 0xffffff; value += relocation; COPY_UNALIGNED_WORD (value, *location, align); break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; }
gpl-2.0
pershoot/android_kernel_asus_tf701t
tools/perf/util/util.c
4806
2375
#include "../perf.h" #include "util.h" #include <sys/mman.h> /* * XXX We need to find a better place for these things... */ bool perf_host = true; bool perf_guest = false; void event_attr_init(struct perf_event_attr *attr) { if (!perf_host) attr->exclude_host = 1; if (!perf_guest) attr->exclude_guest = 1; /* to capture ABI version */ attr->size = sizeof(*attr); } int mkdir_p(char *path, mode_t mode) { struct stat st; int err; char *d = path; if (*d != '/') return -1; if (stat(path, &st) == 0) return 0; while (*++d == '/'); while ((d = strchr(d, '/'))) { *d = '\0'; err = stat(path, &st) && mkdir(path, mode); *d++ = '/'; if (err) return -1; while (*d == '/') ++d; } return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0; } static int slow_copyfile(const char *from, const char *to) { int err = 0; char *line = NULL; size_t n; FILE *from_fp = fopen(from, "r"), *to_fp; if (from_fp == NULL) goto out; to_fp = fopen(to, "w"); if (to_fp == NULL) goto out_fclose_from; while (getline(&line, &n, from_fp) > 0) if (fputs(line, to_fp) == EOF) goto out_fclose_to; err = 0; out_fclose_to: fclose(to_fp); free(line); out_fclose_from: fclose(from_fp); out: return err; } int copyfile(const char *from, const char *to) { int fromfd, tofd; struct stat st; void *addr; int err = -1; if (stat(from, &st)) goto out; if (st.st_size == 0) /* /proc? do it slowly... */ return slow_copyfile(from, to); fromfd = open(from, O_RDONLY); if (fromfd < 0) goto out; tofd = creat(to, 0755); if (tofd < 0) goto out_close_from; addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0); if (addr == MAP_FAILED) goto out_close_to; if (write(tofd, addr, st.st_size) == st.st_size) err = 0; munmap(addr, st.st_size); out_close_to: close(tofd); if (err) unlink(to); out_close_from: close(fromfd); out: return err; } unsigned long convert_unit(unsigned long value, char *unit) { *unit = ' '; if (value > 1000) { value /= 1000; *unit = 'K'; } if (value > 1000) { value /= 1000; *unit = 'M'; } if (value > 1000) { value /= 1000; *unit = 'G'; } return value; } int readn(int fd, void *buf, size_t n) { void *buf_start = buf; while (n) { int ret = read(fd, buf, n); if (ret <= 0) return ret; n -= ret; buf += ret; } return buf - buf_start; }
gpl-2.0
thypon/bowser-kernel
drivers/watchdog/imx2_wdt.c
4806
9531
/* * Watchdog driver for IMX2 and later processors * * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de> * * some parts adapted by similar drivers from Darius Augulis and Vladimir * Zapolskiy, additional improvements by Wim Van Sebroeck. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * NOTE: MX1 has a slightly different Watchdog than MX2 and later: * * MX1: MX2+: * ---- ----- * Registers: 32-bit 16-bit * Stopable timer: Yes No * Need to enable clk: No Yes * Halt on suspend: Manual Can be automatic */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/clk.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <mach/hardware.h> #define DRIVER_NAME "imx2-wdt" #define IMX2_WDT_WCR 0x00 /* Control Register */ #define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */ #define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */ #define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */ #define IMX2_WDT_WSR 0x02 /* Service Register */ #define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */ #define IMX2_WDT_SEQ2 0xAAAA /* -> service sequence 2 */ #define IMX2_WDT_WRSR 0x04 /* Reset Status Register */ #define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */ #define IMX2_WDT_MAX_TIME 128 #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ #define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8) #define IMX2_WDT_STATUS_OPEN 0 #define IMX2_WDT_STATUS_STARTED 1 #define IMX2_WDT_EXPECT_CLOSE 2 static struct { struct clk *clk; void __iomem *base; unsigned timeout; unsigned long status; struct timer_list timer; /* Pings the watchdog when closed */ } imx2_wdt; static struct miscdevice imx2_wdt_miscdev; static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static unsigned timeout = IMX2_WDT_DEFAULT_TIME; module_param(timeout, uint, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default=" __MODULE_STRING(IMX2_WDT_DEFAULT_TIME) ")"); static const struct watchdog_info imx2_wdt_info = { .identity = "imx2+ watchdog", .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, }; static inline void imx2_wdt_setup(void) { u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR); /* Strip the old watchdog Time-Out value */ val &= ~IMX2_WDT_WCR_WT; /* Generate reset if WDOG times out */ val &= ~IMX2_WDT_WCR_WRE; /* Keep Watchdog Disabled */ val &= ~IMX2_WDT_WCR_WDE; /* Set the watchdog's Time-Out value */ val |= WDOG_SEC_TO_COUNT(imx2_wdt.timeout); __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); /* enable the watchdog */ val |= IMX2_WDT_WCR_WDE; __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); } static inline void imx2_wdt_ping(void) { __raw_writew(IMX2_WDT_SEQ1, imx2_wdt.base + IMX2_WDT_WSR); __raw_writew(IMX2_WDT_SEQ2, imx2_wdt.base + IMX2_WDT_WSR); } static void imx2_wdt_timer_ping(unsigned long arg) { /* ping it every imx2_wdt.timeout / 2 seconds to prevent reboot */ imx2_wdt_ping(); mod_timer(&imx2_wdt.timer, jiffies + imx2_wdt.timeout * HZ / 2); } static void imx2_wdt_start(void) { if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { /* at our first start we enable clock and do initialisations */ clk_enable(imx2_wdt.clk); imx2_wdt_setup(); } else /* delete the timer that pings the watchdog after close */ del_timer_sync(&imx2_wdt.timer); /* Watchdog is enabled - time to reload the timeout value */ imx2_wdt_ping(); } static void imx2_wdt_stop(void) { /* we don't need a clk_disable, it cannot be disabled once started. * We use a timer to ping the watchdog while /dev/watchdog is closed */ imx2_wdt_timer_ping(0); } static void imx2_wdt_set_timeout(int new_timeout) { u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR); /* set the new timeout value in the WSR */ val &= ~IMX2_WDT_WCR_WT; val |= WDOG_SEC_TO_COUNT(new_timeout); __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); } static int imx2_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status)) return -EBUSY; imx2_wdt_start(); return nonseekable_open(inode, file); } static int imx2_wdt_close(struct inode *inode, struct file *file) { if (test_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status) && !nowayout) imx2_wdt_stop(); else { dev_crit(imx2_wdt_miscdev.parent, "Unexpected close: Expect reboot!\n"); imx2_wdt_ping(); } clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); clear_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status); return 0; } static long imx2_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_value; u16 val; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &imx2_wdt_info, sizeof(struct watchdog_info)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(0, p); case WDIOC_GETBOOTSTATUS: val = __raw_readw(imx2_wdt.base + IMX2_WDT_WRSR); new_value = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0; return put_user(new_value, p); case WDIOC_KEEPALIVE: imx2_wdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_value, p)) return -EFAULT; if ((new_value < 1) || (new_value > IMX2_WDT_MAX_TIME)) return -EINVAL; imx2_wdt_set_timeout(new_value); imx2_wdt.timeout = new_value; imx2_wdt_ping(); /* Fallthrough to return current value */ case WDIOC_GETTIMEOUT: return put_user(imx2_wdt.timeout, p); default: return -ENOTTY; } } static ssize_t imx2_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { size_t i; char c; if (len == 0) /* Can we see this even ? */ return 0; clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); /* scan to see whether or not we got the magic character */ for (i = 0; i != len; i++) { if (get_user(c, data + i)) return -EFAULT; if (c == 'V') set_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); } imx2_wdt_ping(); return len; } static const struct file_operations imx2_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = imx2_wdt_ioctl, .open = imx2_wdt_open, .release = imx2_wdt_close, .write = imx2_wdt_write, }; static struct miscdevice imx2_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &imx2_wdt_fops, }; static int __init imx2_wdt_probe(struct platform_device *pdev) { int ret; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get device resources\n"); return -ENODEV; } imx2_wdt.base = devm_request_and_ioremap(&pdev->dev, res); if (!imx2_wdt.base) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } imx2_wdt.clk = clk_get(&pdev->dev, NULL); if (IS_ERR(imx2_wdt.clk)) { dev_err(&pdev->dev, "can't get Watchdog clock\n"); return PTR_ERR(imx2_wdt.clk); } imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME); if (imx2_wdt.timeout != timeout) dev_warn(&pdev->dev, "Initial timeout out of range! " "Clamped from %u to %u\n", timeout, imx2_wdt.timeout); setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0); imx2_wdt_miscdev.parent = &pdev->dev; ret = misc_register(&imx2_wdt_miscdev); if (ret) goto fail; dev_info(&pdev->dev, "IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n", imx2_wdt.timeout, nowayout); return 0; fail: imx2_wdt_miscdev.parent = NULL; clk_put(imx2_wdt.clk); return ret; } static int __exit imx2_wdt_remove(struct platform_device *pdev) { misc_deregister(&imx2_wdt_miscdev); if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { del_timer_sync(&imx2_wdt.timer); dev_crit(imx2_wdt_miscdev.parent, "Device removed: Expect reboot!\n"); } else clk_put(imx2_wdt.clk); imx2_wdt_miscdev.parent = NULL; return 0; } static void imx2_wdt_shutdown(struct platform_device *pdev) { if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { /* we are running, we need to delete the timer but will give * max timeout before reboot will take place */ del_timer_sync(&imx2_wdt.timer); imx2_wdt_set_timeout(IMX2_WDT_MAX_TIME); imx2_wdt_ping(); dev_crit(imx2_wdt_miscdev.parent, "Device shutdown: Expect reboot!\n"); } } static const struct of_device_id imx2_wdt_dt_ids[] = { { .compatible = "fsl,imx21-wdt", }, { /* sentinel */ } }; static struct platform_driver imx2_wdt_driver = { .remove = __exit_p(imx2_wdt_remove), .shutdown = imx2_wdt_shutdown, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = imx2_wdt_dt_ids, }, }; static int __init imx2_wdt_init(void) { return platform_driver_probe(&imx2_wdt_driver, imx2_wdt_probe); } module_init(imx2_wdt_init); static void __exit imx2_wdt_exit(void) { platform_driver_unregister(&imx2_wdt_driver); } module_exit(imx2_wdt_exit); MODULE_AUTHOR("Wolfram Sang"); MODULE_DESCRIPTION("Watchdog driver for IMX2 and later"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
ShadySquirrel/e980-zeKrnl
drivers/isdn/mISDN/layer2.c
4806
51074
/* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/mISDNif.h> #include <linux/slab.h> #include "core.h" #include "fsm.h" #include "layer2.h" static u_int *debug; static struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL}; static char *strL2State[] = { "ST_L2_1", "ST_L2_2", "ST_L2_3", "ST_L2_4", "ST_L2_5", "ST_L2_6", "ST_L2_7", "ST_L2_8", }; enum { EV_L2_UI, EV_L2_SABME, EV_L2_DISC, EV_L2_DM, EV_L2_UA, EV_L2_FRMR, EV_L2_SUPER, EV_L2_I, EV_L2_DL_DATA, EV_L2_ACK_PULL, EV_L2_DL_UNITDATA, EV_L2_DL_ESTABLISH_REQ, EV_L2_DL_RELEASE_REQ, EV_L2_MDL_ASSIGN, EV_L2_MDL_REMOVE, EV_L2_MDL_ERROR, EV_L1_DEACTIVATE, EV_L2_T200, EV_L2_T203, EV_L2_SET_OWN_BUSY, EV_L2_CLEAR_OWN_BUSY, EV_L2_FRAME_ERROR, }; #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1) static char *strL2Event[] = { "EV_L2_UI", "EV_L2_SABME", "EV_L2_DISC", "EV_L2_DM", "EV_L2_UA", "EV_L2_FRMR", "EV_L2_SUPER", "EV_L2_I", "EV_L2_DL_DATA", "EV_L2_ACK_PULL", "EV_L2_DL_UNITDATA", "EV_L2_DL_ESTABLISH_REQ", "EV_L2_DL_RELEASE_REQ", "EV_L2_MDL_ASSIGN", "EV_L2_MDL_REMOVE", "EV_L2_MDL_ERROR", "EV_L1_DEACTIVATE", "EV_L2_T200", "EV_L2_T203", "EV_L2_SET_OWN_BUSY", "EV_L2_CLEAR_OWN_BUSY", "EV_L2_FRAME_ERROR", }; static void l2m_debug(struct FsmInst *fi, char *fmt, ...) { struct layer2 *l2 = fi->userdata; struct va_format vaf; va_list va; if (!(*debug & DEBUG_L2_FSM)) return; va_start(va, fmt); vaf.fmt = fmt; vaf.va = &va; printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n", l2->sapi, l2->tei, &vaf); va_end(va); } inline u_int l2headersize(struct layer2 *l2, int ui) { return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) + (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1); } inline u_int l2addrsize(struct layer2 *l2) { return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1; } static u_int l2_newid(struct layer2 *l2) { u_int id; id = l2->next_id++; if (id == 0x7fff) l2->next_id = 1; id <<= 16; id |= l2->tei << 8; id |= l2->sapi; return id; } static void l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb) { int err; if (!l2->up) return; mISDN_HEAD_PRIM(skb) = prim; mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; err = l2->up->send(l2->up, skb); if (err) { printk(KERN_WARNING "%s: err=%d\n", __func__, err); dev_kfree_skb(skb); } } static void l2up_create(struct layer2 *l2, u_int prim, int len, void *arg) { struct sk_buff *skb; struct mISDNhead *hh; int err; if (!l2->up) return; skb = mI_alloc_skb(len, GFP_ATOMIC); if (!skb) return; hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = (l2->ch.nr << 16) | l2->ch.addr; if (len) memcpy(skb_put(skb, len), arg, len); err = l2->up->send(l2->up, skb); if (err) { printk(KERN_WARNING "%s: err=%d\n", __func__, err); dev_kfree_skb(skb); } } static int l2down_skb(struct layer2 *l2, struct sk_buff *skb) { int ret; ret = l2->ch.recv(l2->ch.peer, skb); if (ret && (*debug & DEBUG_L2_RECV)) printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret); return ret; } static int l2down_raw(struct layer2 *l2, struct sk_buff *skb) { struct mISDNhead *hh = mISDN_HEAD_P(skb); if (hh->prim == PH_DATA_REQ) { if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) { skb_queue_tail(&l2->down_queue, skb); return 0; } l2->down_id = mISDN_HEAD_ID(skb); } return l2down_skb(l2, skb); } static int l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb) { struct mISDNhead *hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; return l2down_raw(l2, skb); } static int l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg) { struct sk_buff *skb; int err; struct mISDNhead *hh; skb = mI_alloc_skb(len, GFP_ATOMIC); if (!skb) return -ENOMEM; hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; if (len) memcpy(skb_put(skb, len), arg, len); err = l2down_raw(l2, skb); if (err) dev_kfree_skb(skb); return err; } static int ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) { struct sk_buff *nskb = skb; int ret = -EAGAIN; if (test_bit(FLG_L1_NOTREADY, &l2->flag)) { if (hh->id == l2->down_id) { nskb = skb_dequeue(&l2->down_queue); if (nskb) { l2->down_id = mISDN_HEAD_ID(nskb); if (l2down_skb(l2, nskb)) { dev_kfree_skb(nskb); l2->down_id = MISDN_ID_NONE; } } else l2->down_id = MISDN_ID_NONE; if (ret) { dev_kfree_skb(skb); ret = 0; } if (l2->down_id == MISDN_ID_NONE) { test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag); mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); } } } if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) { nskb = skb_dequeue(&l2->down_queue); if (nskb) { l2->down_id = mISDN_HEAD_ID(nskb); if (l2down_skb(l2, nskb)) { dev_kfree_skb(nskb); l2->down_id = MISDN_ID_NONE; test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag); } } else test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag); } return ret; } static int l2mgr(struct layer2 *l2, u_int prim, void *arg) { long c = (long)arg; printk(KERN_WARNING "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c); if (test_bit(FLG_LAPD, &l2->flag) && !test_bit(FLG_FIXED_TEI, &l2->flag)) { switch (c) { case 'C': case 'D': case 'G': case 'H': l2_tei(l2, prim, (u_long)arg); break; } } return 0; } static void set_peer_busy(struct layer2 *l2) { test_and_set_bit(FLG_PEER_BUSY, &l2->flag); if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue)) test_and_set_bit(FLG_L2BLOCK, &l2->flag); } static void clear_peer_busy(struct layer2 *l2) { if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag)) test_and_clear_bit(FLG_L2BLOCK, &l2->flag); } static void InitWin(struct layer2 *l2) { int i; for (i = 0; i < MAX_WINDOW; i++) l2->windowar[i] = NULL; } static int freewin(struct layer2 *l2) { int i, cnt = 0; for (i = 0; i < MAX_WINDOW; i++) { if (l2->windowar[i]) { cnt++; dev_kfree_skb(l2->windowar[i]); l2->windowar[i] = NULL; } } return cnt; } static void ReleaseWin(struct layer2 *l2) { int cnt = freewin(l2); if (cnt) printk(KERN_WARNING "isdnl2 freed %d skbuffs in release\n", cnt); } inline unsigned int cansend(struct layer2 *l2) { unsigned int p1; if (test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; else p1 = (l2->vs - l2->va) % 8; return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag); } inline void clear_exception(struct layer2 *l2) { test_and_clear_bit(FLG_ACK_PEND, &l2->flag); test_and_clear_bit(FLG_REJEXC, &l2->flag); test_and_clear_bit(FLG_OWN_BUSY, &l2->flag); clear_peer_busy(l2); } static int sethdraddr(struct layer2 *l2, u_char *header, int rsp) { u_char *ptr = header; int crbit = rsp; if (test_bit(FLG_LAPD, &l2->flag)) { if (test_bit(FLG_LAPD_NET, &l2->flag)) crbit = !crbit; *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0); *ptr++ = (l2->tei << 1) | 1; return 2; } else { if (test_bit(FLG_ORIG, &l2->flag)) crbit = !crbit; if (crbit) *ptr++ = l2->addr.B; else *ptr++ = l2->addr.A; return 1; } } static inline void enqueue_super(struct layer2 *l2, struct sk_buff *skb) { if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb)) dev_kfree_skb(skb); } static inline void enqueue_ui(struct layer2 *l2, struct sk_buff *skb) { if (l2->tm) l2_tei(l2, MDL_STATUS_UI_IND, 0); if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb)) dev_kfree_skb(skb); } inline int IsUI(u_char *data) { return (data[0] & 0xef) == UI; } inline int IsUA(u_char *data) { return (data[0] & 0xef) == UA; } inline int IsDM(u_char *data) { return (data[0] & 0xef) == DM; } inline int IsDISC(u_char *data) { return (data[0] & 0xef) == DISC; } inline int IsRR(u_char *data, struct layer2 *l2) { if (test_bit(FLG_MOD128, &l2->flag)) return data[0] == RR; else return (data[0] & 0xf) == 1; } inline int IsSFrame(u_char *data, struct layer2 *l2) { register u_char d = *data; if (!test_bit(FLG_MOD128, &l2->flag)) d &= 0xf; return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c); } inline int IsSABME(u_char *data, struct layer2 *l2) { u_char d = data[0] & ~0x10; return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM; } inline int IsREJ(u_char *data, struct layer2 *l2) { return test_bit(FLG_MOD128, &l2->flag) ? data[0] == REJ : (data[0] & 0xf) == REJ; } inline int IsFRMR(u_char *data) { return (data[0] & 0xef) == FRMR; } inline int IsRNR(u_char *data, struct layer2 *l2) { return test_bit(FLG_MOD128, &l2->flag) ? data[0] == RNR : (data[0] & 0xf) == RNR; } static int iframe_error(struct layer2 *l2, struct sk_buff *skb) { u_int i; int rsp = *skb->data & 0x2; i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1); if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len < i) return 'N'; if ((skb->len - i) > l2->maxlen) return 'O'; return 0; } static int super_error(struct layer2 *l2, struct sk_buff *skb) { if (skb->len != l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1)) return 'N'; return 0; } static int unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp) { int rsp = (*skb->data & 0x2) >> 1; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (rsp != wantrsp) return 'L'; if (skb->len != l2addrsize(l2) + 1) return 'N'; return 0; } static int UI_error(struct layer2 *l2, struct sk_buff *skb) { int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (rsp) return 'L'; if (skb->len > l2->maxlen + l2addrsize(l2) + 1) return 'O'; return 0; } static int FRMR_error(struct layer2 *l2, struct sk_buff *skb) { u_int headers = l2addrsize(l2) + 1; u_char *datap = skb->data + headers; int rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; if (!rsp) return 'L'; if (test_bit(FLG_MOD128, &l2->flag)) { if (skb->len < headers + 5) return 'N'; else if (*debug & DEBUG_L2) l2m_debug(&l2->l2m, "FRMR information %2x %2x %2x %2x %2x", datap[0], datap[1], datap[2], datap[3], datap[4]); } else { if (skb->len < headers + 3) return 'N'; else if (*debug & DEBUG_L2) l2m_debug(&l2->l2m, "FRMR information %2x %2x %2x", datap[0], datap[1], datap[2]); } return 0; } static unsigned int legalnr(struct layer2 *l2, unsigned int nr) { if (test_bit(FLG_MOD128, &l2->flag)) return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128); else return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8); } static void setva(struct layer2 *l2, unsigned int nr) { struct sk_buff *skb; while (l2->va != nr) { l2->va++; if (test_bit(FLG_MOD128, &l2->flag)) l2->va %= 128; else l2->va %= 8; if (l2->windowar[l2->sow]) { skb_trim(l2->windowar[l2->sow], 0); skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]); l2->windowar[l2->sow] = NULL; } l2->sow = (l2->sow + 1) % l2->window; } skb = skb_dequeue(&l2->tmp_queue); while (skb) { dev_kfree_skb(skb); skb = skb_dequeue(&l2->tmp_queue); } } static void send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr) { u_char tmp[MAX_L2HEADER_LEN]; int i; i = sethdraddr(l2, tmp, cr); tmp[i++] = cmd; if (skb) skb_trim(skb, 0); else { skb = mI_alloc_skb(i, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "%s: can't alloc skbuff\n", __func__); return; } } memcpy(skb_put(skb, i), tmp, i); enqueue_super(l2, skb); } inline u_char get_PollFlag(struct layer2 *l2, struct sk_buff *skb) { return skb->data[l2addrsize(l2)] & 0x10; } inline u_char get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb) { u_char PF; PF = get_PollFlag(l2, skb); dev_kfree_skb(skb); return PF; } inline void start_t200(struct layer2 *l2, int i) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &l2->flag); } inline void restart_t200(struct layer2 *l2, int i) { mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i); test_and_set_bit(FLG_T200_RUN, &l2->flag); } inline void stop_t200(struct layer2 *l2, int i) { if (test_and_clear_bit(FLG_T200_RUN, &l2->flag)) mISDN_FsmDelTimer(&l2->t200, i); } inline void st5_dl_release_l2l3(struct layer2 *l2) { int pr; if (test_and_clear_bit(FLG_PEND_REL, &l2->flag)) pr = DL_RELEASE_CNF; else pr = DL_RELEASE_IND; l2up_create(l2, pr, 0, NULL); } inline void lapb_dl_release_l2l3(struct layer2 *l2, int f) { if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); l2up_create(l2, f, 0, NULL); } static void establishlink(struct FsmInst *fi) { struct layer2 *l2 = fi->userdata; u_char cmd; clear_exception(l2); l2->rc = 0; cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10; send_uframe(l2, NULL, cmd, CMD); mISDN_FsmDelTimer(&l2->t203, 1); restart_t200(l2, 1); test_and_clear_bit(FLG_PEND_REL, &l2->flag); freewin(l2); mISDN_FsmChangeState(fi, ST_L2_5); } static void l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; if (get_PollFlagFree(l2, skb)) l2mgr(l2, MDL_ERROR_IND, (void *) 'C'); else l2mgr(l2, MDL_ERROR_IND, (void *) 'D'); } static void l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; if (get_PollFlagFree(l2, skb)) l2mgr(l2, MDL_ERROR_IND, (void *) 'B'); else { l2mgr(l2, MDL_ERROR_IND, (void *) 'E'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } } static void l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; if (get_PollFlagFree(l2, skb)) l2mgr(l2, MDL_ERROR_IND, (void *) 'B'); else l2mgr(l2, MDL_ERROR_IND, (void *) 'E'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } static void l2_go_st3(struct FsmInst *fi, int event, void *arg) { dev_kfree_skb((struct sk_buff *)arg); mISDN_FsmChangeState(fi, ST_L2_3); } static void l2_mdl_assign(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; mISDN_FsmChangeState(fi, ST_L2_3); dev_kfree_skb((struct sk_buff *)arg); l2_tei(l2, MDL_ASSIGN_IND, 0); } static void l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->ui_queue, skb); mISDN_FsmChangeState(fi, ST_L2_2); l2_tei(l2, MDL_ASSIGN_IND, 0); } static void l2_queue_ui(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->ui_queue, skb); } static void tx_ui(struct layer2 *l2) { struct sk_buff *skb; u_char header[MAX_L2HEADER_LEN]; int i; i = sethdraddr(l2, header, CMD); if (test_bit(FLG_LAPD_NET, &l2->flag)) header[1] = 0xff; /* tei 127 */ header[i++] = UI; while ((skb = skb_dequeue(&l2->ui_queue))) { memcpy(skb_push(skb, i), header, i); enqueue_ui(l2, skb); } } static void l2_send_ui(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->ui_queue, skb); tx_ui(l2); } static void l2_got_ui(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2headersize(l2, 1)); /* * in states 1-3 for broadcast */ if (l2->tm) l2_tei(l2, MDL_STATUS_UI_IND, 0); l2up(l2, DL_UNITDATA_IND, skb); } static void l2_establish(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); dev_kfree_skb(skb); } static void l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); test_and_set_bit(FLG_L3_INIT, &l2->flag); test_and_clear_bit(FLG_PEND_REL, &l2->flag); dev_kfree_skb(skb); } static void l2_l3_reestablish(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); dev_kfree_skb(skb); } static void l2_release(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_trim(skb, 0); l2up(l2, DL_RELEASE_CNF, skb); } static void l2_pend_rel(struct FsmInst *fi, int event, void *arg) { struct sk_buff *skb = arg; struct layer2 *l2 = fi->userdata; test_and_set_bit(FLG_PEND_REL, &l2->flag); dev_kfree_skb(skb); } static void l2_disconnect(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); freewin(l2); mISDN_FsmChangeState(fi, ST_L2_6); l2->rc = 0; send_uframe(l2, NULL, DISC | 0x10, CMD); mISDN_FsmDelTimer(&l2->t203, 1); restart_t200(l2, 2); if (skb) dev_kfree_skb(skb); } static void l2_start_multi(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; l2->vs = 0; l2->va = 0; l2->vr = 0; l2->sow = 0; clear_exception(l2); send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP); mISDN_FsmChangeState(fi, ST_L2_7); mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3); skb_trim(skb, 0); l2up(l2, DL_ESTABLISH_IND, skb); if (l2->tm) l2_tei(l2, MDL_STATUS_UP_IND, 0); } static void l2_send_UA(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP); } static void l2_send_DM(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP); } static void l2_restart_multi(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int est = 0; send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP); l2mgr(l2, MDL_ERROR_IND, (void *) 'F'); if (l2->vs != l2->va) { skb_queue_purge(&l2->i_queue); est = 1; } clear_exception(l2); l2->vs = 0; l2->va = 0; l2->vr = 0; l2->sow = 0; mISDN_FsmChangeState(fi, ST_L2_7); stop_t200(l2, 3); mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3); if (est) l2up_create(l2, DL_ESTABLISH_IND, 0, NULL); /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST, * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED, * 0, NULL, 0); */ if (skb_queue_len(&l2->i_queue) && cansend(l2)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } static void l2_stop_multi(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; mISDN_FsmChangeState(fi, ST_L2_4); mISDN_FsmDelTimer(&l2->t203, 3); stop_t200(l2, 4); send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP); skb_queue_purge(&l2->i_queue); freewin(l2); lapb_dl_release_l2l3(l2, DL_RELEASE_IND); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_connected(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int pr = -1; if (!get_PollFlag(l2, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); if (test_and_clear_bit(FLG_PEND_REL, &l2->flag)) l2_disconnect(fi, event, NULL); if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) { pr = DL_ESTABLISH_CNF; } else if (l2->vs != l2->va) { skb_queue_purge(&l2->i_queue); pr = DL_ESTABLISH_IND; } stop_t200(l2, 5); l2->vr = 0; l2->vs = 0; l2->va = 0; l2->sow = 0; mISDN_FsmChangeState(fi, ST_L2_7); mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4); if (pr != -1) l2up_create(l2, pr, 0, NULL); if (skb_queue_len(&l2->i_queue) && cansend(l2)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); if (l2->tm) l2_tei(l2, MDL_STATUS_UP_IND, 0); } static void l2_released(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlag(l2, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); stop_t200(l2, 6); lapb_dl_release_l2l3(l2, DL_RELEASE_CNF); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_reestablish(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!get_PollFlagFree(l2, skb)) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); } } static void l2_st5_dm_release(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(l2, skb)) { stop_t200(l2, 7); if (!test_bit(FLG_L3_INIT, &l2->flag)) skb_queue_purge(&l2->i_queue); if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); st5_dl_release_l2l3(l2); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } } static void l2_st6_dm_release(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (get_PollFlagFree(l2, skb)) { stop_t200(l2, 8); lapb_dl_release_l2l3(l2, DL_RELEASE_CNF); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } } static void enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf) { struct sk_buff *skb; u_char tmp[MAX_L2HEADER_LEN]; int i; i = sethdraddr(l2, tmp, cr); if (test_bit(FLG_MOD128, &l2->flag)) { tmp[i++] = typ; tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0); } else tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); skb = mI_alloc_skb(i, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "isdnl2 can't alloc sbbuff for enquiry_cr\n"); return; } memcpy(skb_put(skb, i), tmp, i); enqueue_super(l2, skb); } inline void enquiry_response(struct layer2 *l2) { if (test_bit(FLG_OWN_BUSY, &l2->flag)) enquiry_cr(l2, RNR, RSP, 1); else enquiry_cr(l2, RR, RSP, 1); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } inline void transmit_enquiry(struct layer2 *l2) { if (test_bit(FLG_OWN_BUSY, &l2->flag)) enquiry_cr(l2, RNR, CMD, 1); else enquiry_cr(l2, RR, CMD, 1); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); start_t200(l2, 9); } static void nrerrorrecovery(struct FsmInst *fi) { struct layer2 *l2 = fi->userdata; l2mgr(l2, MDL_ERROR_IND, (void *) 'J'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } static void invoke_retransmission(struct layer2 *l2, unsigned int nr) { u_int p1; if (l2->vs != nr) { while (l2->vs != nr) { (l2->vs)--; if (test_bit(FLG_MOD128, &l2->flag)) { l2->vs %= 128; p1 = (l2->vs - l2->va) % 128; } else { l2->vs %= 8; p1 = (l2->vs - l2->va) % 8; } p1 = (p1 + l2->sow) % l2->window; if (l2->windowar[p1]) skb_queue_head(&l2->i_queue, l2->windowar[p1]); else printk(KERN_WARNING "%s: windowar[%d] is NULL\n", __func__, p1); l2->windowar[p1] = NULL; } mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); } } static void l2_st7_got_super(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, typ = RR; unsigned int nr; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, l2)) { set_peer_busy(l2); typ = RNR; } else clear_peer_busy(l2); if (IsREJ(skb->data, l2)) typ = REJ; if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (PollFlag) { if (rsp) l2mgr(l2, MDL_ERROR_IND, (void *) 'A'); else enquiry_response(l2); } if (legalnr(l2, nr)) { if (typ == REJ) { setva(l2, nr); invoke_retransmission(l2, nr); stop_t200(l2, 10); if (mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 6)) l2m_debug(&l2->l2m, "Restart T203 ST7 REJ"); } else if ((nr == l2->vs) && (typ == RR)) { setva(l2, nr); stop_t200(l2, 11); mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 7); } else if ((l2->va != nr) || (typ == RNR)) { setva(l2, nr); if (typ != RR) mISDN_FsmDelTimer(&l2->t203, 9); restart_t200(l2, 12); } if (skb_queue_len(&l2->i_queue) && (typ == RR)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } else nrerrorrecovery(fi); } static void l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!test_bit(FLG_L3_INIT, &l2->flag)) skb_queue_tail(&l2->i_queue, skb); else dev_kfree_skb(skb); } static void l2_feed_i_pull(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->i_queue, skb); mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } static void l2_feed_iqueue(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_tail(&l2->i_queue, skb); } static void l2_got_iframe(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int PollFlag, i; u_int ns, nr; i = l2addrsize(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = ((skb->data[i + 1] & 0x1) == 0x1); ns = skb->data[i] >> 1; nr = (skb->data[i + 1] >> 1) & 0x7f; } else { PollFlag = (skb->data[i] & 0x10); ns = (skb->data[i] >> 1) & 0x7; nr = (skb->data[i] >> 5) & 0x7; } if (test_bit(FLG_OWN_BUSY, &l2->flag)) { dev_kfree_skb(skb); if (PollFlag) enquiry_response(l2); } else { if (l2->vr == ns) { l2->vr++; if (test_bit(FLG_MOD128, &l2->flag)) l2->vr %= 128; else l2->vr %= 8; test_and_clear_bit(FLG_REJEXC, &l2->flag); if (PollFlag) enquiry_response(l2); else test_and_set_bit(FLG_ACK_PEND, &l2->flag); skb_pull(skb, l2headersize(l2, 0)); l2up(l2, DL_DATA_IND, skb); } else { /* n(s)!=v(r) */ dev_kfree_skb(skb); if (test_and_set_bit(FLG_REJEXC, &l2->flag)) { if (PollFlag) enquiry_response(l2); } else { enquiry_cr(l2, REJ, RSP, PollFlag); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } } } if (legalnr(l2, nr)) { if (!test_bit(FLG_PEER_BUSY, &l2->flag) && (fi->state == ST_L2_7)) { if (nr == l2->vs) { stop_t200(l2, 13); mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 7); } else if (nr != l2->va) restart_t200(l2, 14); } setva(l2, nr); } else { nrerrorrecovery(fi); return; } if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag)) enquiry_cr(l2, RR, RSP, 0); } static void l2_got_tei(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; u_int info; l2->tei = (signed char)(long)arg; set_channel_address(&l2->ch, l2->sapi, l2->tei); info = DL_INFO_L2_CONNECT; l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info); if (fi->state == ST_L2_3) { establishlink(fi); test_and_set_bit(FLG_L3_INIT, &l2->flag); } else mISDN_FsmChangeState(fi, ST_L2_4); if (skb_queue_len(&l2->ui_queue)) tx_ui(l2); } static void l2_st5_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); } else if (l2->rc == l2->N200) { mISDN_FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &l2->flag); skb_queue_purge(&l2->i_queue); l2mgr(l2, MDL_ERROR_IND, (void *) 'G'); if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); st5_dl_release_l2l3(l2); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } else { l2->rc++; mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10, CMD); } } static void l2_st6_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); } else if (l2->rc == l2->N200) { mISDN_FsmChangeState(fi, ST_L2_4); test_and_clear_bit(FLG_T200_RUN, &l2->flag); l2mgr(l2, MDL_ERROR_IND, (void *) 'H'); lapb_dl_release_l2l3(l2, DL_RELEASE_CNF); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } else { l2->rc++; mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); send_uframe(l2, NULL, DISC | 0x10, CMD); } } static void l2_st7_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &l2->flag); l2->rc = 0; mISDN_FsmChangeState(fi, ST_L2_8); transmit_enquiry(l2); l2->rc++; } static void l2_st8_tout_200(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9); return; } test_and_clear_bit(FLG_T200_RUN, &l2->flag); if (l2->rc == l2->N200) { l2mgr(l2, MDL_ERROR_IND, (void *) 'I'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } else { transmit_enquiry(l2); l2->rc++; } } static void l2_st7_tout_203(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; if (test_bit(FLG_LAPD, &l2->flag) && test_bit(FLG_DCHAN_BUSY, &l2->flag)) { mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9); return; } mISDN_FsmChangeState(fi, ST_L2_8); transmit_enquiry(l2); l2->rc = 0; } static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb, *nskb, *oskb; u_char header[MAX_L2HEADER_LEN]; u_int i, p1; if (!cansend(l2)) return; skb = skb_dequeue(&l2->i_queue); if (!skb) return; if (test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; else p1 = (l2->vs - l2->va) % 8; p1 = (p1 + l2->sow) % l2->window; if (l2->windowar[p1]) { printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", p1); dev_kfree_skb(l2->windowar[p1]); } l2->windowar[p1] = skb; i = sethdraddr(l2, header, CMD); if (test_bit(FLG_MOD128, &l2->flag)) { header[i++] = l2->vs << 1; header[i++] = l2->vr << 1; l2->vs = (l2->vs + 1) % 128; } else { header[i++] = (l2->vr << 5) | (l2->vs << 1); l2->vs = (l2->vs + 1) % 8; } nskb = skb_clone(skb, GFP_ATOMIC); p1 = skb_headroom(nskb); if (p1 >= i) memcpy(skb_push(nskb, i), header, i); else { printk(KERN_WARNING "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); oskb = nskb; nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC); if (!nskb) { dev_kfree_skb(oskb); printk(KERN_WARNING "%s: no skb mem\n", __func__); return; } memcpy(skb_put(nskb, i), header, i); memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len); dev_kfree_skb(oskb); } l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) { mISDN_FsmDelTimer(&l2->t203, 13); mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11); } } static void l2_st8_got_super(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int PollFlag, rsp, rnr = 0; unsigned int nr; rsp = *skb->data & 0x2; if (test_bit(FLG_ORIG, &l2->flag)) rsp = !rsp; skb_pull(skb, l2addrsize(l2)); if (IsRNR(skb->data, l2)) { set_peer_busy(l2); rnr = 1; } else clear_peer_busy(l2); if (test_bit(FLG_MOD128, &l2->flag)) { PollFlag = (skb->data[1] & 0x1) == 0x1; nr = skb->data[1] >> 1; } else { PollFlag = (skb->data[0] & 0x10); nr = (skb->data[0] >> 5) & 0x7; } dev_kfree_skb(skb); if (rsp && PollFlag) { if (legalnr(l2, nr)) { if (rnr) { restart_t200(l2, 15); } else { stop_t200(l2, 16); mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 5); setva(l2, nr); } invoke_retransmission(l2, nr); mISDN_FsmChangeState(fi, ST_L2_7); if (skb_queue_len(&l2->i_queue) && cansend(l2)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); } else nrerrorrecovery(fi); } else { if (!rsp && PollFlag) enquiry_response(l2); if (legalnr(l2, nr)) setva(l2, nr); else nrerrorrecovery(fi); } } static void l2_got_FRMR(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_pull(skb, l2addrsize(l2) + 1); if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */ (IsUA(skb->data) && (fi->state == ST_L2_7))) { l2mgr(l2, MDL_ERROR_IND, (void *) 'K'); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } dev_kfree_skb(skb); } static void l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->ui_queue); l2->tei = GROUP_TEI; mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->ui_queue); l2->tei = GROUP_TEI; l2up_create(l2, DL_RELEASE_IND, 0, NULL); mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); l2->tei = GROUP_TEI; stop_t200(l2, 17); st5_dl_release_l2l3(l2); mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->ui_queue); l2->tei = GROUP_TEI; stop_t200(l2, 18); l2up_create(l2, DL_RELEASE_IND, 0, NULL); mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_tei_remove(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); l2->tei = GROUP_TEI; stop_t200(l2, 17); mISDN_FsmDelTimer(&l2->t203, 19); l2up_create(l2, DL_RELEASE_IND, 0, NULL); /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST, * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED, * 0, NULL, 0); */ mISDN_FsmChangeState(fi, ST_L2_1); } static void l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag)) l2up(l2, DL_RELEASE_IND, skb); else dev_kfree_skb(skb); } static void l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); stop_t200(l2, 19); st5_dl_release_l2l3(l2); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); dev_kfree_skb(skb); } static void l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->ui_queue); stop_t200(l2, 20); l2up(l2, DL_RELEASE_CNF, skb); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_persistent_da(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); freewin(l2); stop_t200(l2, 19); mISDN_FsmDelTimer(&l2->t203, 19); l2up(l2, DL_RELEASE_IND, skb); mISDN_FsmChangeState(fi, ST_L2_4); if (l2->tm) l2_tei(l2, MDL_STATUS_DOWN_IND, 0); } static void l2_set_own_busy(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) { enquiry_cr(l2, RNR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } if (skb) dev_kfree_skb(skb); } static void l2_clear_own_busy(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) { enquiry_cr(l2, RR, RSP, 0); test_and_clear_bit(FLG_ACK_PEND, &l2->flag); } if (skb) dev_kfree_skb(skb); } static void l2_frame_error(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; l2mgr(l2, MDL_ERROR_IND, arg); } static void l2_frame_error_reest(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; l2mgr(l2, MDL_ERROR_IND, arg); establishlink(fi); test_and_clear_bit(FLG_L3_INIT, &l2->flag); } static struct FsmNode L2FnList[] = { {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign}, {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3}, {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish}, {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3}, {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish}, {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release}, {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel}, {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect}, {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest}, {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull}, {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue}, {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign}, {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui}, {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui}, {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui}, {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei}, {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove}, {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove}, {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove}, {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove}, {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove}, {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove}, {ST_L2_4, EV_L2_SABME, l2_start_multi}, {ST_L2_5, EV_L2_SABME, l2_send_UA}, {ST_L2_6, EV_L2_SABME, l2_send_DM}, {ST_L2_7, EV_L2_SABME, l2_restart_multi}, {ST_L2_8, EV_L2_SABME, l2_restart_multi}, {ST_L2_4, EV_L2_DISC, l2_send_DM}, {ST_L2_5, EV_L2_DISC, l2_send_DM}, {ST_L2_6, EV_L2_DISC, l2_send_UA}, {ST_L2_7, EV_L2_DISC, l2_stop_multi}, {ST_L2_8, EV_L2_DISC, l2_stop_multi}, {ST_L2_4, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_5, EV_L2_UA, l2_connected}, {ST_L2_6, EV_L2_UA, l2_released}, {ST_L2_7, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_8, EV_L2_UA, l2_mdl_error_ua}, {ST_L2_4, EV_L2_DM, l2_reestablish}, {ST_L2_5, EV_L2_DM, l2_st5_dm_release}, {ST_L2_6, EV_L2_DM, l2_st6_dm_release}, {ST_L2_7, EV_L2_DM, l2_mdl_error_dm}, {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm}, {ST_L2_1, EV_L2_UI, l2_got_ui}, {ST_L2_2, EV_L2_UI, l2_got_ui}, {ST_L2_3, EV_L2_UI, l2_got_ui}, {ST_L2_4, EV_L2_UI, l2_got_ui}, {ST_L2_5, EV_L2_UI, l2_got_ui}, {ST_L2_6, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_UI, l2_got_ui}, {ST_L2_8, EV_L2_UI, l2_got_ui}, {ST_L2_7, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_8, EV_L2_FRMR, l2_got_FRMR}, {ST_L2_7, EV_L2_SUPER, l2_st7_got_super}, {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, {ST_L2_7, EV_L2_I, l2_got_iframe}, {ST_L2_8, EV_L2_I, l2_got_iframe}, {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy}, {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error}, {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest}, {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove}, {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove}, {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da}, {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da}, {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da}, {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da}, {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da}, }; static int ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) { u_char *datap = skb->data; int ret = -EINVAL; int psapi, ptei; u_int l; int c = 0; l = l2addrsize(l2); if (skb->len <= l) { mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N'); return ret; } if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */ psapi = *datap++; ptei = *datap++; if ((psapi & 1) || !(ptei & 1)) { printk(KERN_WARNING "l2 D-channel frame wrong EA0/EA1\n"); return ret; } psapi >>= 2; ptei >>= 1; if (psapi != l2->sapi) { /* not our business */ if (*debug & DEBUG_L2) printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n", __func__, psapi, l2->sapi); dev_kfree_skb(skb); return 0; } if ((ptei != l2->tei) && (ptei != GROUP_TEI)) { /* not our business */ if (*debug & DEBUG_L2) printk(KERN_DEBUG "%s: tei %d/%d mismatch\n", __func__, ptei, l2->tei); dev_kfree_skb(skb); return 0; } } else datap += l; if (!(*datap & 1)) { /* I-Frame */ c = iframe_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb); } else if (IsSFrame(datap, l2)) { /* S-Frame */ c = super_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb); } else if (IsUI(datap)) { c = UI_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb); } else if (IsSABME(datap, l2)) { c = unnum_error(l2, skb, CMD); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb); } else if (IsUA(datap)) { c = unnum_error(l2, skb, RSP); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb); } else if (IsDISC(datap)) { c = unnum_error(l2, skb, CMD); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb); } else if (IsDM(datap)) { c = unnum_error(l2, skb, RSP); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb); } else if (IsFRMR(datap)) { c = FRMR_error(l2, skb); if (!c) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb); } else c = 'L'; if (c) { printk(KERN_WARNING "l2 D-channel frame error %c\n", c); mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c); } return ret; } static int l2_send(struct mISDNchannel *ch, struct sk_buff *skb) { struct layer2 *l2 = container_of(ch, struct layer2, ch); struct mISDNhead *hh = mISDN_HEAD_P(skb); int ret = -EINVAL; if (*debug & DEBUG_L2_RECV) printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n", __func__, hh->prim, hh->id, l2->sapi, l2->tei); switch (hh->prim) { case PH_DATA_IND: ret = ph_data_indication(l2, hh, skb); break; case PH_DATA_CNF: ret = ph_data_confirm(l2, hh, skb); break; case PH_ACTIVATE_IND: test_and_set_bit(FLG_L1_ACTIV, &l2->flag); l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL); if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag)) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_ESTABLISH_REQ, skb); break; case PH_DEACTIVATE_IND: test_and_clear_bit(FLG_L1_ACTIV, &l2->flag); l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL); ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb); break; case MPH_INFORMATION_IND: if (!l2->up) break; ret = l2->up->send(l2->up, skb); break; case DL_DATA_REQ: ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb); break; case DL_UNITDATA_REQ: ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb); break; case DL_ESTABLISH_REQ: if (test_bit(FLG_LAPB, &l2->flag)) test_and_set_bit(FLG_ORIG, &l2->flag); if (test_bit(FLG_L1_ACTIV, &l2->flag)) { if (test_bit(FLG_LAPD, &l2->flag) || test_bit(FLG_ORIG, &l2->flag)) ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_ESTABLISH_REQ, skb); } else { if (test_bit(FLG_LAPD, &l2->flag) || test_bit(FLG_ORIG, &l2->flag)) { test_and_set_bit(FLG_ESTAB_PEND, &l2->flag); } ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2), skb); } break; case DL_RELEASE_REQ: if (test_bit(FLG_LAPB, &l2->flag)) l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL); ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ, skb); break; default: if (*debug & DEBUG_L2) l2m_debug(&l2->l2m, "l2 unknown pr %04x", hh->prim); } if (ret) { dev_kfree_skb(skb); ret = 0; } return ret; } int tei_l2(struct layer2 *l2, u_int cmd, u_long arg) { int ret = -EINVAL; if (*debug & DEBUG_L2_TEI) printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd); switch (cmd) { case (MDL_ASSIGN_REQ): ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg); break; case (MDL_REMOVE_REQ): ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL); break; case (MDL_ERROR_IND): ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); break; case (MDL_ERROR_RSP): /* ETS 300-125 5.3.2.1 Test: TC13010 */ printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n"); ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); break; } return ret; } static void release_l2(struct layer2 *l2) { mISDN_FsmDelTimer(&l2->t200, 21); mISDN_FsmDelTimer(&l2->t203, 16); skb_queue_purge(&l2->i_queue); skb_queue_purge(&l2->ui_queue); skb_queue_purge(&l2->down_queue); ReleaseWin(l2); if (test_bit(FLG_LAPD, &l2->flag)) { TEIrelease(l2); if (l2->ch.st) l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, CLOSE_CHANNEL, NULL); } kfree(l2); } static int l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct layer2 *l2 = container_of(ch, struct layer2, ch); u_int info; if (*debug & DEBUG_L2_CTRL) printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); switch (cmd) { case OPEN_CHANNEL: if (test_bit(FLG_LAPD, &l2->flag)) { set_channel_address(&l2->ch, l2->sapi, l2->tei); info = DL_INFO_L2_CONNECT; l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info); } break; case CLOSE_CHANNEL: if (l2->ch.peer) l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL); release_l2(l2); break; } return 0; } struct layer2 * create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei, int sapi) { struct layer2 *l2; struct channel_req rq; l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL); if (!l2) { printk(KERN_ERR "kzalloc layer2 failed\n"); return NULL; } l2->next_id = 1; l2->down_id = MISDN_ID_NONE; l2->up = ch; l2->ch.st = ch->st; l2->ch.send = l2_send; l2->ch.ctrl = l2_ctrl; switch (protocol) { case ISDN_P_LAPD_NT: test_and_set_bit(FLG_LAPD, &l2->flag); test_and_set_bit(FLG_LAPD_NET, &l2->flag); test_and_set_bit(FLG_MOD128, &l2->flag); l2->sapi = sapi; l2->maxlen = MAX_DFRAME_LEN; if (test_bit(OPTION_L2_PMX, &options)) l2->window = 7; else l2->window = 1; if (test_bit(OPTION_L2_PTP, &options)) test_and_set_bit(FLG_PTP, &l2->flag); if (test_bit(OPTION_L2_FIXEDTEI, &options)) test_and_set_bit(FLG_FIXED_TEI, &l2->flag); l2->tei = tei; l2->T200 = 1000; l2->N200 = 3; l2->T203 = 10000; if (test_bit(OPTION_L2_PMX, &options)) rq.protocol = ISDN_P_NT_E1; else rq.protocol = ISDN_P_NT_S0; rq.adr.channel = 0; l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq); break; case ISDN_P_LAPD_TE: test_and_set_bit(FLG_LAPD, &l2->flag); test_and_set_bit(FLG_MOD128, &l2->flag); test_and_set_bit(FLG_ORIG, &l2->flag); l2->sapi = sapi; l2->maxlen = MAX_DFRAME_LEN; if (test_bit(OPTION_L2_PMX, &options)) l2->window = 7; else l2->window = 1; if (test_bit(OPTION_L2_PTP, &options)) test_and_set_bit(FLG_PTP, &l2->flag); if (test_bit(OPTION_L2_FIXEDTEI, &options)) test_and_set_bit(FLG_FIXED_TEI, &l2->flag); l2->tei = tei; l2->T200 = 1000; l2->N200 = 3; l2->T203 = 10000; if (test_bit(OPTION_L2_PMX, &options)) rq.protocol = ISDN_P_TE_E1; else rq.protocol = ISDN_P_TE_S0; rq.adr.channel = 0; l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq); break; case ISDN_P_B_X75SLP: test_and_set_bit(FLG_LAPB, &l2->flag); l2->window = 7; l2->maxlen = MAX_DATA_SIZE; l2->T200 = 1000; l2->N200 = 4; l2->T203 = 5000; l2->addr.A = 3; l2->addr.B = 1; break; default: printk(KERN_ERR "layer2 create failed prt %x\n", protocol); kfree(l2); return NULL; } skb_queue_head_init(&l2->i_queue); skb_queue_head_init(&l2->ui_queue); skb_queue_head_init(&l2->down_queue); skb_queue_head_init(&l2->tmp_queue); InitWin(l2); l2->l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &l2->flag) || test_bit(FLG_PTP, &l2->flag) || test_bit(FLG_LAPD_NET, &l2->flag)) l2->l2m.state = ST_L2_4; else l2->l2m.state = ST_L2_1; l2->l2m.debug = *debug; l2->l2m.userdata = l2; l2->l2m.userint = 0; l2->l2m.printdebug = l2m_debug; mISDN_FsmInitTimer(&l2->l2m, &l2->t200); mISDN_FsmInitTimer(&l2->l2m, &l2->t203); return l2; } static int x75create(struct channel_req *crq) { struct layer2 *l2; if (crq->protocol != ISDN_P_B_X75SLP) return -EPROTONOSUPPORT; l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0); if (!l2) return -ENOMEM; crq->ch = &l2->ch; crq->protocol = ISDN_P_B_HDLC; return 0; } static struct Bprotocol X75SLP = { .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)), .name = "X75SLP", .create = x75create }; int Isdnl2_Init(u_int *deb) { debug = deb; mISDN_register_Bprotocol(&X75SLP); l2fsm.state_count = L2_STATE_COUNT; l2fsm.event_count = L2_EVENT_COUNT; l2fsm.strEvent = strL2Event; l2fsm.strState = strL2State; mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); TEIInit(deb); return 0; } void Isdnl2_cleanup(void) { mISDN_unregister_Bprotocol(&X75SLP); TEIFree(); mISDN_FsmFree(&l2fsm); }
gpl-2.0
joyfish/android_kernel_huawei_msm8928
drivers/net/ethernet/emulex/benet/be_ethtool.c
4806
22196
/* * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include "be.h" #include "be_cmds.h" #include <linux/ethtool.h> struct be_ethtool_stat { char desc[ETH_GSTRING_LEN]; int type; int size; int offset; }; enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ offsetof(_struct, field) #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\ FIELDINFO(struct be_tx_stats, field) #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\ FIELDINFO(struct be_rx_stats, field) #define DRVSTAT_INFO(field) #field, DRVSTAT,\ FIELDINFO(struct be_drv_stats, field) static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(rx_crc_errors)}, {DRVSTAT_INFO(rx_alignment_symbol_errors)}, {DRVSTAT_INFO(rx_pause_frames)}, {DRVSTAT_INFO(rx_control_frames)}, /* Received packets dropped when the Ethernet length field * is not equal to the actual Ethernet data length. */ {DRVSTAT_INFO(rx_in_range_errors)}, /* Received packets dropped when their length field is >= 1501 bytes * and <= 1535 bytes. */ {DRVSTAT_INFO(rx_out_range_errors)}, /* Received packets dropped when they are longer than 9216 bytes */ {DRVSTAT_INFO(rx_frame_too_long)}, /* Received packets dropped when they don't pass the unicast or * multicast address filtering. */ {DRVSTAT_INFO(rx_address_mismatch_drops)}, /* Received packets dropped when IP packet length field is less than * the IP header length field. */ {DRVSTAT_INFO(rx_dropped_too_small)}, /* Received packets dropped when IP length field is greater than * the actual packet length. */ {DRVSTAT_INFO(rx_dropped_too_short)}, /* Received packets dropped when the IP header length field is less * than 5. */ {DRVSTAT_INFO(rx_dropped_header_too_small)}, /* Received packets dropped when the TCP header length field is less * than 5 or the TCP header length + IP header length is more * than IP packet length. */ {DRVSTAT_INFO(rx_dropped_tcp_length)}, {DRVSTAT_INFO(rx_dropped_runt)}, /* Number of received packets dropped when a fifo for descriptors going * into the packet demux block overflows. In normal operation, this * fifo must never overflow. */ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, {DRVSTAT_INFO(rx_ip_checksum_errs)}, {DRVSTAT_INFO(rx_tcp_checksum_errs)}, {DRVSTAT_INFO(rx_udp_checksum_errs)}, {DRVSTAT_INFO(tx_pauseframes)}, {DRVSTAT_INFO(tx_controlframes)}, {DRVSTAT_INFO(rx_priority_pause_frames)}, /* Received packets dropped when an internal fifo going into * main packet buffer tank (PMEM) overflows. */ {DRVSTAT_INFO(pmem_fifo_overflow_drop)}, {DRVSTAT_INFO(jabber_events)}, /* Received packets dropped due to lack of available HW packet buffers * used to temporarily hold the received packets. */ {DRVSTAT_INFO(rx_drops_no_pbuf)}, /* Received packets dropped due to input receive buffer * descriptor fifo overflowing. */ {DRVSTAT_INFO(rx_drops_no_erx_descr)}, /* Packets dropped because the internal FIFO to the offloaded TCP * receive processing block is full. This could happen only for * offloaded iSCSI or FCoE trarffic. */ {DRVSTAT_INFO(rx_drops_no_tpre_descr)}, /* Received packets dropped when they need more than 8 * receive buffers. This cannot happen as the driver configures * 2048 byte receive buffers. */ {DRVSTAT_INFO(rx_drops_too_many_frags)}, {DRVSTAT_INFO(forwarded_packets)}, /* Received packets dropped when the frame length * is more than 9018 bytes */ {DRVSTAT_INFO(rx_drops_mtu)}, /* Number of packets dropped due to random early drop function */ {DRVSTAT_INFO(eth_red_drops)}, {DRVSTAT_INFO(be_on_die_temperature)} }; #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts * are first and second members respectively. */ static const struct be_ethtool_stat et_rx_stats[] = { {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_compl)}, {DRVSTAT_RX_INFO(rx_mcast_pkts)}, /* Number of page allocation failures while posting receive buffers * to HW. */ {DRVSTAT_RX_INFO(rx_post_fail)}, /* Recevied packets dropped due to skb allocation failure */ {DRVSTAT_RX_INFO(rx_drops_no_skbs)}, /* Received packets dropped due to lack of available fetched buffers * posted by the driver. */ {DRVSTAT_RX_INFO(rx_drops_no_frags)} }; #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) /* Stats related to multi TX queues: get_stats routine assumes compl is the * first member */ static const struct be_ethtool_stat et_tx_stats[] = { {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ {DRVSTAT_TX_INFO(tx_bytes)}, {DRVSTAT_TX_INFO(tx_pkts)}, /* Number of skbs queued for trasmission by the driver */ {DRVSTAT_TX_INFO(tx_reqs)}, /* Number of TX work request blocks DMAed to HW */ {DRVSTAT_TX_INFO(tx_wrbs)}, /* Number of times the TX queue was stopped due to lack * of spaces in the TXQ. */ {DRVSTAT_TX_INFO(tx_stops)} }; #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) static const char et_self_tests[][ETH_GSTRING_LEN] = { "MAC Loopback test", "PHY Loopback test", "External Loopback test", "DDR DMA test", "Link test" }; #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) #define BE_MAC_LOOPBACK 0x0 #define BE_PHY_LOOPBACK 0x1 #define BE_ONE_PORT_EXT_LOOPBACK 0x2 #define BE_NO_LOOPBACK 0xff static void be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct be_adapter *adapter = netdev_priv(netdev); char fw_on_flash[FW_VER_LEN]; memset(fw_on_flash, 0 , sizeof(fw_on_flash)); be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash); strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN); if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) { strcat(drvinfo->fw_version, " ["); strcat(drvinfo->fw_version, fw_on_flash); strcat(drvinfo->fw_version, "]"); } strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) { u32 data_read = 0, eof; u8 addn_status; struct be_dma_mem data_len_cmd; int status; memset(&data_len_cmd, 0, sizeof(data_len_cmd)); /* data_offset and data_size should be 0 to get reg len */ status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name, &data_read, &eof, &addn_status); return data_read; } static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, u32 buf_len, void *buf) { struct be_dma_mem read_cmd; u32 read_len = 0, total_read_len = 0, chunk_size; u32 eof = 0; u8 addn_status; int status = 0; read_cmd.size = LANCER_READ_FILE_CHUNK; read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, &read_cmd.dma); if (!read_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure while reading dump\n"); return -ENOMEM; } while ((total_read_len < buf_len) && !eof) { chunk_size = min_t(u32, (buf_len - total_read_len), LANCER_READ_FILE_CHUNK); chunk_size = ALIGN(chunk_size, 4); status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, total_read_len, file_name, &read_len, &eof, &addn_status); if (!status) { memcpy(buf + total_read_len, read_cmd.va, read_len); total_read_len += read_len; eof &= LANCER_READ_FILE_EOF_MASK; } else { status = -EIO; break; } } pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, read_cmd.dma); return status; } static int be_get_reg_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); u32 log_size = 0; if (be_physfn(adapter)) { if (lancer_chip(adapter)) log_size = lancer_cmd_get_file_len(adapter, LANCER_FW_DUMP_FILE); else be_cmd_get_reg_len(adapter, &log_size); } return log_size; } static void be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) { struct be_adapter *adapter = netdev_priv(netdev); if (be_physfn(adapter)) { memset(buf, 0, regs->len); if (lancer_chip(adapter)) lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, regs->len, buf); else be_cmd_get_regs(adapter, regs->len, buf); } } static int be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *et) { struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *eqo = &adapter->eq_obj[0]; et->rx_coalesce_usecs = eqo->cur_eqd; et->rx_coalesce_usecs_high = eqo->max_eqd; et->rx_coalesce_usecs_low = eqo->min_eqd; et->tx_coalesce_usecs = eqo->cur_eqd; et->tx_coalesce_usecs_high = eqo->max_eqd; et->tx_coalesce_usecs_low = eqo->min_eqd; et->use_adaptive_rx_coalesce = eqo->enable_aic; et->use_adaptive_tx_coalesce = eqo->enable_aic; return 0; } /* TX attributes are ignored. Only RX attributes are considered * eqd cmd is issued in the worker thread. */ static int be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *et) { struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *eqo; int i; for_all_evt_queues(adapter, eqo, i) { eqo->enable_aic = et->use_adaptive_rx_coalesce; eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd); eqo->eqd = et->rx_coalesce_usecs; } return 0; } static void be_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, uint64_t *data) { struct be_adapter *adapter = netdev_priv(netdev); struct be_rx_obj *rxo; struct be_tx_obj *txo; void *p; unsigned int i, j, base = 0, start; for (i = 0; i < ETHTOOL_STATS_NUM; i++) { p = (u8 *)&adapter->drv_stats + et_stats[i].offset; data[i] = *(u32 *)p; } base += ETHTOOL_STATS_NUM; for_all_rx_queues(adapter, rxo, j) { struct be_rx_stats *stats = rx_stats(rxo); do { start = u64_stats_fetch_begin_bh(&stats->sync); data[base] = stats->rx_bytes; data[base + 1] = stats->rx_pkts; } while (u64_stats_fetch_retry_bh(&stats->sync, start)); for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { p = (u8 *)stats + et_rx_stats[i].offset; data[base + i] = *(u32 *)p; } base += ETHTOOL_RXSTATS_NUM; } for_all_tx_queues(adapter, txo, j) { struct be_tx_stats *stats = tx_stats(txo); do { start = u64_stats_fetch_begin_bh(&stats->sync_compl); data[base] = stats->tx_compl; } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); do { start = u64_stats_fetch_begin_bh(&stats->sync); for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { p = (u8 *)stats + et_tx_stats[i].offset; data[base + i] = (et_tx_stats[i].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } while (u64_stats_fetch_retry_bh(&stats->sync, start)); base += ETHTOOL_TXSTATS_NUM; } } static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) { struct be_adapter *adapter = netdev_priv(netdev); int i, j; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ETHTOOL_STATS_NUM; i++) { memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_rx_qs; i++) { for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) { sprintf(data, "rxq%d: %s", i, et_rx_stats[j].desc); data += ETH_GSTRING_LEN; } } for (i = 0; i < adapter->num_tx_qs; i++) { for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) { sprintf(data, "txq%d: %s", i, et_tx_stats[j].desc); data += ETH_GSTRING_LEN; } } break; case ETH_SS_TEST: for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { memcpy(data, et_self_tests[i], ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } break; } } static int be_get_sset_count(struct net_device *netdev, int stringset) { struct be_adapter *adapter = netdev_priv(netdev); switch (stringset) { case ETH_SS_TEST: return ETHTOOL_TESTS_NUM; case ETH_SS_STATS: return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM; default: return -EINVAL; } } static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); struct be_phy_info phy_info; u8 mac_speed = 0; u16 link_speed = 0; u8 link_status; int status; if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { status = be_cmd_link_status_query(adapter, &mac_speed, &link_speed, &link_status, 0); if (!status) be_link_status_update(adapter, link_status); /* link_speed is in units of 10 Mbps */ if (link_speed) { ethtool_cmd_speed_set(ecmd, link_speed*10); } else { switch (mac_speed) { case PHY_LINK_SPEED_10MBPS: ethtool_cmd_speed_set(ecmd, SPEED_10); break; case PHY_LINK_SPEED_100MBPS: ethtool_cmd_speed_set(ecmd, SPEED_100); break; case PHY_LINK_SPEED_1GBPS: ethtool_cmd_speed_set(ecmd, SPEED_1000); break; case PHY_LINK_SPEED_10GBPS: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; case PHY_LINK_SPEED_ZERO: ethtool_cmd_speed_set(ecmd, 0); break; } } status = be_cmd_get_phy_info(adapter, &phy_info); if (!status) { switch (phy_info.interface_type) { case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: case PHY_TYPE_SFP_PLUS_10GB: ecmd->port = PORT_FIBRE; break; default: ecmd->port = PORT_TP; break; } switch (phy_info.interface_type) { case PHY_TYPE_KR_10GB: case PHY_TYPE_KX4_10GB: ecmd->autoneg = AUTONEG_ENABLE; ecmd->transceiver = XCVR_INTERNAL; break; default: ecmd->autoneg = AUTONEG_DISABLE; ecmd->transceiver = XCVR_EXTERNAL; break; } } /* Save for future use */ adapter->link_speed = ethtool_cmd_speed(ecmd); adapter->port_type = ecmd->port; adapter->transceiver = ecmd->transceiver; adapter->autoneg = ecmd->autoneg; } else { ethtool_cmd_speed_set(ecmd, adapter->link_speed); ecmd->port = adapter->port_type; ecmd->transceiver = adapter->transceiver; ecmd->autoneg = adapter->autoneg; } ecmd->duplex = DUPLEX_FULL; ecmd->phy_address = adapter->port_num; switch (ecmd->port) { case PORT_FIBRE: ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); break; case PORT_TP: ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP); break; case PORT_AUI: ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI); break; } if (ecmd->autoneg) { ecmd->supported |= SUPPORTED_1000baseT_Full; ecmd->supported |= SUPPORTED_Autoneg; ecmd->advertising |= (ADVERTISED_10000baseT_Full | ADVERTISED_1000baseT_Full); } return 0; } static void be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct be_adapter *adapter = netdev_priv(netdev); ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; } static void be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); ecmd->autoneg = 0; } static int be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); int status; if (ecmd->autoneg != 0) return -EINVAL; adapter->tx_fc = ecmd->tx_pause; adapter->rx_fc = ecmd->rx_pause; status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); return status; } static int be_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct be_adapter *adapter = netdev_priv(netdev); switch (state) { case ETHTOOL_ID_ACTIVE: be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &adapter->beacon_state); return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, BEACON_STATE_ENABLED); break; case ETHTOOL_ID_OFF: be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, BEACON_STATE_DISABLED); break; case ETHTOOL_ID_INACTIVE: be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, adapter->beacon_state); } return 0; } static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); if (be_is_wol_supported(adapter)) { wol->supported |= WAKE_MAGIC; wol->wolopts |= WAKE_MAGIC; } else wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; if (!be_is_wol_supported(adapter)) { dev_warn(&adapter->pdev->dev, "WOL not supported\n"); return -EOPNOTSUPP; } if (wol->wolopts & WAKE_MAGIC) adapter->wol = true; else adapter->wol = false; return 0; } static int be_test_ddr_dma(struct be_adapter *adapter) { int ret, i; struct be_dma_mem ddrdma_cmd; static const u64 pattern[2] = { 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL }; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, &ddrdma_cmd.dma, GFP_KERNEL); if (!ddrdma_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); return -ENOMEM; } for (i = 0; i < 2; i++) { ret = be_cmd_ddr_dma_test(adapter, pattern[i], 4096, &ddrdma_cmd); if (ret != 0) goto err; } err: dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va, ddrdma_cmd.dma); return ret; } static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, u64 *status) { be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, loopback_type, 1500, 2, 0xabc); be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); return *status; } static void be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) { struct be_adapter *adapter = netdev_priv(netdev); u8 mac_speed = 0; u16 qos_link_speed = 0; memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); if (test->flags & ETH_TEST_FL_OFFLINE) { if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) { test->flags |= ETH_TEST_FL_FAILED; } if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0) { test->flags |= ETH_TEST_FL_FAILED; } if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, &data[2]) != 0) { test->flags |= ETH_TEST_FL_FAILED; } } if (be_test_ddr_dma(adapter) != 0) { data[3] = 1; test->flags |= ETH_TEST_FL_FAILED; } if (be_cmd_link_status_query(adapter, &mac_speed, &qos_link_speed, NULL, 0) != 0) { test->flags |= ETH_TEST_FL_FAILED; data[4] = -1; } else if (!mac_speed) { test->flags |= ETH_TEST_FL_FAILED; data[4] = 1; } } static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) { struct be_adapter *adapter = netdev_priv(netdev); return be_load_fw(adapter, efl->data); } static int be_get_eeprom_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); if (lancer_chip(adapter)) { if (be_physfn(adapter)) return lancer_cmd_get_file_len(adapter, LANCER_VPD_PF_FILE); else return lancer_cmd_get_file_len(adapter, LANCER_VPD_VF_FILE); } else { return BE_READ_SEEPROM_LEN; } } static int be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, uint8_t *data) { struct be_adapter *adapter = netdev_priv(netdev); struct be_dma_mem eeprom_cmd; struct be_cmd_resp_seeprom_read *resp; int status; if (!eeprom->len) return -EINVAL; if (lancer_chip(adapter)) { if (be_physfn(adapter)) return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, eeprom->len, data); else return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, eeprom->len, data); } eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, &eeprom_cmd.dma, GFP_KERNEL); if (!eeprom_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure. Could not read eeprom\n"); return -ENOMEM; } status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); if (!status) { resp = eeprom_cmd.va; memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); } dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va, eeprom_cmd.dma); return status; } const struct ethtool_ops be_ethtool_ops = { .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, .get_wol = be_get_wol, .set_wol = be_set_wol, .get_link = ethtool_op_get_link, .get_eeprom_len = be_get_eeprom_len, .get_eeprom = be_read_eeprom, .get_coalesce = be_get_coalesce, .set_coalesce = be_set_coalesce, .get_ringparam = be_get_ringparam, .get_pauseparam = be_get_pauseparam, .set_pauseparam = be_set_pauseparam, .get_strings = be_get_stat_strings, .set_phys_id = be_set_phys_id, .get_sset_count = be_get_sset_count, .get_ethtool_stats = be_get_ethtool_stats, .get_regs_len = be_get_reg_len, .get_regs = be_get_regs, .flash_device = be_do_flash, .self_test = be_self_test, };
gpl-2.0
fedya/android_kernel_msm8974
drivers/net/wireless/wl12xx/testmode.c
4806
7414
/* * This file is part of wl1271 * * Copyright (C) 2010 Nokia Corporation * * Contact: Luciano Coelho <luciano.coelho@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "testmode.h" #include <linux/slab.h> #include <net/genetlink.h> #include "wl12xx.h" #include "debug.h" #include "acx.h" #include "reg.h" #include "ps.h" #include "io.h" #define WL1271_TM_MAX_DATA_LENGTH 1024 enum wl1271_tm_commands { WL1271_TM_CMD_UNSPEC, WL1271_TM_CMD_TEST, WL1271_TM_CMD_INTERROGATE, WL1271_TM_CMD_CONFIGURE, WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */ WL1271_TM_CMD_SET_PLT_MODE, WL1271_TM_CMD_RECOVER, WL1271_TM_CMD_GET_MAC, __WL1271_TM_CMD_AFTER_LAST }; #define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1) enum wl1271_tm_attrs { WL1271_TM_ATTR_UNSPEC, WL1271_TM_ATTR_CMD_ID, WL1271_TM_ATTR_ANSWER, WL1271_TM_ATTR_DATA, WL1271_TM_ATTR_IE_ID, WL1271_TM_ATTR_PLT_MODE, __WL1271_TM_ATTR_AFTER_LAST }; #define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1) static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = { [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 }, [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 }, [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY, .len = WL1271_TM_MAX_DATA_LENGTH }, [WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 }, [WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 }, }; static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) { int buf_len, ret, len; struct sk_buff *skb; void *buf; u8 answer = 0; wl1271_debug(DEBUG_TESTMODE, "testmode cmd test"); if (!tb[WL1271_TM_ATTR_DATA]) return -EINVAL; buf = nla_data(tb[WL1271_TM_ATTR_DATA]); buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); if (tb[WL1271_TM_ATTR_ANSWER]) answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]); if (buf_len > sizeof(struct wl1271_command)) return -EMSGSIZE; mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { ret = -EINVAL; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl1271_cmd_test(wl, buf, buf_len, answer); if (ret < 0) { wl1271_warning("testmode cmd test failed: %d", ret); goto out_sleep; } if (answer) { len = nla_total_size(buf_len); skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); if (!skb) { ret = -ENOMEM; goto out_sleep; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out_sleep; } out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; nla_put_failure: kfree_skb(skb); ret = -EMSGSIZE; goto out_sleep; } static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) { int ret; struct wl1271_command *cmd; struct sk_buff *skb; u8 ie_id; wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate"); if (!tb[WL1271_TM_ATTR_IE_ID]) return -EINVAL; ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { ret = -EINVAL; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { ret = -ENOMEM; goto out_sleep; } ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd)); if (ret < 0) { wl1271_warning("testmode cmd interrogate failed: %d", ret); goto out_free; } skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); if (!skb) { ret = -ENOMEM; goto out_free; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out_free; out_free: kfree(cmd); out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; nla_put_failure: kfree_skb(skb); ret = -EMSGSIZE; goto out_free; } static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) { int buf_len, ret; void *buf; u8 ie_id; wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure"); if (!tb[WL1271_TM_ATTR_DATA]) return -EINVAL; if (!tb[WL1271_TM_ATTR_IE_ID]) return -EINVAL; ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); buf = nla_data(tb[WL1271_TM_ATTR_DATA]); buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); if (buf_len > sizeof(struct wl1271_command)) return -EMSGSIZE; mutex_lock(&wl->mutex); ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len); mutex_unlock(&wl->mutex); if (ret < 0) { wl1271_warning("testmode cmd configure failed: %d", ret); return ret; } return 0; } static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) { u32 val; int ret; wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode"); if (!tb[WL1271_TM_ATTR_PLT_MODE]) return -EINVAL; val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]); switch (val) { case 0: ret = wl1271_plt_stop(wl); break; case 1: ret = wl1271_plt_start(wl); break; default: ret = -EINVAL; break; } return ret; } static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[]) { wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover"); wl12xx_queue_recovery_work(wl); return 0; } static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[]) { struct sk_buff *skb; u8 mac_addr[ETH_ALEN]; int ret = 0; mutex_lock(&wl->mutex); if (!wl->plt) { ret = -EINVAL; goto out; } if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { ret = -EOPNOTSUPP; goto out; } mac_addr[0] = (u8)(wl->fuse_oui_addr >> 16); mac_addr[1] = (u8)(wl->fuse_oui_addr >> 8); mac_addr[2] = (u8) wl->fuse_oui_addr; mac_addr[3] = (u8)(wl->fuse_nic_addr >> 16); mac_addr[4] = (u8)(wl->fuse_nic_addr >> 8); mac_addr[5] = (u8) wl->fuse_nic_addr; skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, ETH_ALEN); if (!skb) { ret = -ENOMEM; goto out; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr); ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out; out: mutex_unlock(&wl->mutex); return ret; nla_put_failure: kfree_skb(skb); ret = -EMSGSIZE; goto out; } int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) { struct wl1271 *wl = hw->priv; struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; int err; err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy); if (err) return err; if (!tb[WL1271_TM_ATTR_CMD_ID]) return -EINVAL; switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) { case WL1271_TM_CMD_TEST: return wl1271_tm_cmd_test(wl, tb); case WL1271_TM_CMD_INTERROGATE: return wl1271_tm_cmd_interrogate(wl, tb); case WL1271_TM_CMD_CONFIGURE: return wl1271_tm_cmd_configure(wl, tb); case WL1271_TM_CMD_SET_PLT_MODE: return wl1271_tm_cmd_set_plt_mode(wl, tb); case WL1271_TM_CMD_RECOVER: return wl1271_tm_cmd_recover(wl, tb); case WL1271_TM_CMD_GET_MAC: return wl12xx_tm_cmd_get_mac(wl, tb); default: return -EOPNOTSUPP; } }
gpl-2.0
zarboz/dlxpul_43
drivers/edac/i5400_edac.c
4806
40522
/* * Intel 5400 class Memory Controllers kernel module (Seaburg) * * This file may be distributed under the terms of the * GNU General Public License. * * Copyright (c) 2008 by: * Ben Woodard <woodard@redhat.com> * Mauro Carvalho Chehab <mchehab@redhat.com> * * Red Hat Inc. http://www.redhat.com * * Forked and adapted from the i5000_edac driver which was * written by Douglas Thompson Linux Networx <norsk5@xmission.com> * * This module is based on the following document: * * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet * http://developer.intel.com/design/chipsets/datashts/313070.htm * */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/edac.h> #include <linux/mmzone.h> #include "edac_core.h" /* * Alter this version for the I5400 module when modifications are made */ #define I5400_REVISION " Ver: 1.0.0" #define EDAC_MOD_STR "i5400_edac" #define i5400_printk(level, fmt, arg...) \ edac_printk(level, "i5400", fmt, ##arg) #define i5400_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg) /* Limits for i5400 */ #define NUM_MTRS_PER_BRANCH 4 #define CHANNELS_PER_BRANCH 2 #define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH #define MAX_CHANNELS 4 /* max possible csrows per channel */ #define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL) /* Device 16, * Function 0: System Address * Function 1: Memory Branch Map, Control, Errors Register * Function 2: FSB Error Registers * * All 3 functions of Device 16 (0,1,2) share the SAME DID and * uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2), * PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1 * for device 21 (0,1). */ /* OFFSETS for Function 0 */ #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ #define MAXCH 0x56 /* Max Channel Number */ #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ /* OFFSETS for Function 1 */ #define TOLM 0x6C #define REDMEMB 0x7C #define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */ #define MIR0 0x80 #define MIR1 0x84 #define AMIR0 0x8c #define AMIR1 0x90 /* Fatal error registers */ #define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */ #define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */ #define NERR_FAT_FBD 0x9c #define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */ /* Non-fatal error register */ #define NERR_NF_FBD 0xa4 /* Enable error mask */ #define EMASK_FBD 0xa8 #define ERR0_FBD 0xac #define ERR1_FBD 0xb0 #define ERR2_FBD 0xb4 #define MCERR_FBD 0xb8 /* No OFFSETS for Device 16 Function 2 */ /* * Device 21, * Function 0: Memory Map Branch 0 * * Device 22, * Function 0: Memory Map Branch 1 */ /* OFFSETS for Function 0 */ #define AMBPRESENT_0 0x64 #define AMBPRESENT_1 0x66 #define MTR0 0x80 #define MTR1 0x82 #define MTR2 0x84 #define MTR3 0x86 /* OFFSETS for Function 1 */ #define NRECFGLOG 0x74 #define RECFGLOG 0x78 #define NRECMEMA 0xbe #define NRECMEMB 0xc0 #define NRECFB_DIMMA 0xc4 #define NRECFB_DIMMB 0xc8 #define NRECFB_DIMMC 0xcc #define NRECFB_DIMMD 0xd0 #define NRECFB_DIMME 0xd4 #define NRECFB_DIMMF 0xd8 #define REDMEMA 0xdC #define RECMEMA 0xf0 #define RECMEMB 0xf4 #define RECFB_DIMMA 0xf8 #define RECFB_DIMMB 0xec #define RECFB_DIMMC 0xf0 #define RECFB_DIMMD 0xf4 #define RECFB_DIMME 0xf8 #define RECFB_DIMMF 0xfC /* * Error indicator bits and masks * Error masks are according with Table 5-17 of i5400 datasheet */ enum error_mask { EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */ EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */ EMASK_M3 = 1<<2, /* Reserved */ EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */ EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */ EMASK_M6 = 1<<5, /* Unsupported on i5400 */ EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */ EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */ EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */ EMASK_M10 = 1<<9, /* Unsupported on i5400 */ EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */ EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */ EMASK_M13 = 1<<12, /* Memory Write error on first attempt */ EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */ EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */ EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */ EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */ EMASK_M18 = 1<<17, /* Unsupported on i5400 */ EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */ EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */ EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */ EMASK_M22 = 1<<21, /* SPD protocol Error */ EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */ EMASK_M24 = 1<<23, /* Refresh error */ EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */ EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */ EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */ EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */ EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */ }; /* * Names to translate bit error into something useful */ static const char *error_name[] = { [0] = "Memory Write error on non-redundant retry", [1] = "Memory or FB-DIMM configuration CRC read error", /* Reserved */ [3] = "Uncorrectable Data ECC on Replay", [4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", /* M6 Unsupported on i5400 */ [6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", [7] = "Aliased Uncorrectable Patrol Data ECC", [8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", /* M10 Unsupported on i5400 */ [10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", [11] = "Non-Aliased Uncorrectable Patrol Data ECC", [12] = "Memory Write error on first attempt", [13] = "FB-DIMM Configuration Write error on first attempt", [14] = "Memory or FB-DIMM configuration CRC read error", [15] = "Channel Failed-Over Occurred", [16] = "Correctable Non-Mirrored Demand Data ECC", /* M18 Unsupported on i5400 */ [18] = "Correctable Resilver- or Spare-Copy Data ECC", [19] = "Correctable Patrol Data ECC", [20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status", [21] = "SPD protocol Error", [22] = "Non-Redundant Fast Reset Timeout", [23] = "Refresh error", [24] = "Memory Write error on redundant retry", [25] = "Redundant Fast Reset Timeout", [26] = "Correctable Counter Threshold Exceeded", [27] = "DIMM-Spare Copy Completed", [28] = "DIMM-Isolation Completed", }; /* Fatal errors */ #define ERROR_FAT_MASK (EMASK_M1 | \ EMASK_M2 | \ EMASK_M23) /* Correctable errors */ #define ERROR_NF_CORRECTABLE (EMASK_M27 | \ EMASK_M20 | \ EMASK_M19 | \ EMASK_M18 | \ EMASK_M17 | \ EMASK_M16) #define ERROR_NF_DIMM_SPARE (EMASK_M29 | \ EMASK_M28) #define ERROR_NF_SPD_PROTOCOL (EMASK_M22) #define ERROR_NF_NORTH_CRC (EMASK_M21) /* Recoverable errors */ #define ERROR_NF_RECOVERABLE (EMASK_M26 | \ EMASK_M25 | \ EMASK_M24 | \ EMASK_M15 | \ EMASK_M14 | \ EMASK_M13 | \ EMASK_M12 | \ EMASK_M11 | \ EMASK_M9 | \ EMASK_M8 | \ EMASK_M7 | \ EMASK_M5) /* uncorrectable errors */ #define ERROR_NF_UNCORRECTABLE (EMASK_M4) /* mask to all non-fatal errors */ #define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \ ERROR_NF_UNCORRECTABLE | \ ERROR_NF_RECOVERABLE | \ ERROR_NF_DIMM_SPARE | \ ERROR_NF_SPD_PROTOCOL | \ ERROR_NF_NORTH_CRC) /* * Define error masks for the several registers */ /* Enable all fatal and non fatal errors */ #define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK) /* mask for fatal error registers */ #define FERR_FAT_MASK ERROR_FAT_MASK /* masks for non-fatal error register */ static inline int to_nf_mask(unsigned int mask) { return (mask & EMASK_M29) | (mask >> 3); }; static inline int from_nf_ferr(unsigned int mask) { return (mask & EMASK_M29) | /* Bit 28 */ (mask & ((1 << 28) - 1) << 3); /* Bits 0 to 27 */ }; #define FERR_NF_MASK to_nf_mask(ERROR_NF_MASK) #define FERR_NF_CORRECTABLE to_nf_mask(ERROR_NF_CORRECTABLE) #define FERR_NF_DIMM_SPARE to_nf_mask(ERROR_NF_DIMM_SPARE) #define FERR_NF_SPD_PROTOCOL to_nf_mask(ERROR_NF_SPD_PROTOCOL) #define FERR_NF_NORTH_CRC to_nf_mask(ERROR_NF_NORTH_CRC) #define FERR_NF_RECOVERABLE to_nf_mask(ERROR_NF_RECOVERABLE) #define FERR_NF_UNCORRECTABLE to_nf_mask(ERROR_NF_UNCORRECTABLE) /* Defines to extract the vaious fields from the * MTRx - Memory Technology Registers */ #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10)) #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9)) #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 8)) ? 8 : 4) #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 6)) ? 8 : 4) #define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2) #define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1) #define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1) #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) /* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */ static inline int extract_fbdchan_indx(u32 x) { return (x>>28) & 0x3; } #ifdef CONFIG_EDAC_DEBUG /* MTR NUMROW */ static const char *numrow_toString[] = { "8,192 - 13 rows", "16,384 - 14 rows", "32,768 - 15 rows", "65,536 - 16 rows" }; /* MTR NUMCOL */ static const char *numcol_toString[] = { "1,024 - 10 columns", "2,048 - 11 columns", "4,096 - 12 columns", "reserved" }; #endif /* Device name and register DID (Device ID) */ struct i5400_dev_info { const char *ctl_name; /* name for this device */ u16 fsb_mapping_errors; /* DID for the branchmap,control */ }; /* Table of devices attributes supported by this driver */ static const struct i5400_dev_info i5400_devs[] = { { .ctl_name = "I5400", .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR, }, }; struct i5400_dimm_info { int megabytes; /* size, 0 means not present */ }; /* driver private data structure */ struct i5400_pvt { struct pci_dev *system_address; /* 16.0 */ struct pci_dev *branchmap_werrors; /* 16.1 */ struct pci_dev *fsb_error_regs; /* 16.2 */ struct pci_dev *branch_0; /* 21.0 */ struct pci_dev *branch_1; /* 22.0 */ u16 tolm; /* top of low memory */ u64 ambase; /* AMB BAR */ u16 mir0, mir1; u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ u16 b0_ambpresent0; /* Branch 0, Channel 0 */ u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ u16 b1_ambpresent0; /* Branch 1, Channel 8 */ u16 b1_ambpresent1; /* Branch 1, Channel 1 */ /* DIMM information matrix, allocating architecture maximums */ struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS]; /* Actual values for this controller */ int maxch; /* Max channels */ int maxdimmperch; /* Max DIMMs per channel */ }; /* I5400 MCH error information retrieved from Hardware */ struct i5400_error_info { /* These registers are always read from the MC */ u32 ferr_fat_fbd; /* First Errors Fatal */ u32 nerr_fat_fbd; /* Next Errors Fatal */ u32 ferr_nf_fbd; /* First Errors Non-Fatal */ u32 nerr_nf_fbd; /* Next Errors Non-Fatal */ /* These registers are input ONLY if there was a Recoverable Error */ u32 redmemb; /* Recoverable Mem Data Error log B */ u16 recmema; /* Recoverable Mem Error log A */ u32 recmemb; /* Recoverable Mem Error log B */ /* These registers are input ONLY if there was a Non-Rec Error */ u16 nrecmema; /* Non-Recoverable Mem log A */ u16 nrecmemb; /* Non-Recoverable Mem log B */ }; /* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and 5400 better to use an inline function than a macro in this case */ static inline int nrec_bank(struct i5400_error_info *info) { return ((info->nrecmema) >> 12) & 0x7; } static inline int nrec_rank(struct i5400_error_info *info) { return ((info->nrecmema) >> 8) & 0xf; } static inline int nrec_buf_id(struct i5400_error_info *info) { return ((info->nrecmema)) & 0xff; } static inline int nrec_rdwr(struct i5400_error_info *info) { return (info->nrecmemb) >> 31; } /* This applies to both NREC and REC string so it can be used with nrec_rdwr and rec_rdwr */ static inline const char *rdwr_str(int rdwr) { return rdwr ? "Write" : "Read"; } static inline int nrec_cas(struct i5400_error_info *info) { return ((info->nrecmemb) >> 16) & 0x1fff; } static inline int nrec_ras(struct i5400_error_info *info) { return (info->nrecmemb) & 0xffff; } static inline int rec_bank(struct i5400_error_info *info) { return ((info->recmema) >> 12) & 0x7; } static inline int rec_rank(struct i5400_error_info *info) { return ((info->recmema) >> 8) & 0xf; } static inline int rec_rdwr(struct i5400_error_info *info) { return (info->recmemb) >> 31; } static inline int rec_cas(struct i5400_error_info *info) { return ((info->recmemb) >> 16) & 0x1fff; } static inline int rec_ras(struct i5400_error_info *info) { return (info->recmemb) & 0xffff; } static struct edac_pci_ctl_info *i5400_pci; /* * i5400_get_error_info Retrieve the hardware error information from * the hardware and cache it in the 'info' * structure */ static void i5400_get_error_info(struct mem_ctl_info *mci, struct i5400_error_info *info) { struct i5400_pvt *pvt; u32 value; pvt = mci->pvt_info; /* read in the 1st FATAL error register */ pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value); /* Mask only the bits that the doc says are valid */ value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK); /* If there is an error, then read in the NEXT FATAL error register and the Memory Error Log Register A */ if (value & FERR_FAT_MASK) { info->ferr_fat_fbd = value; /* harvest the various error data we need */ pci_read_config_dword(pvt->branchmap_werrors, NERR_FAT_FBD, &info->nerr_fat_fbd); pci_read_config_word(pvt->branchmap_werrors, NRECMEMA, &info->nrecmema); pci_read_config_word(pvt->branchmap_werrors, NRECMEMB, &info->nrecmemb); /* Clear the error bits, by writing them back */ pci_write_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, value); } else { info->ferr_fat_fbd = 0; info->nerr_fat_fbd = 0; info->nrecmema = 0; info->nrecmemb = 0; } /* read in the 1st NON-FATAL error register */ pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value); /* If there is an error, then read in the 1st NON-FATAL error * register as well */ if (value & FERR_NF_MASK) { info->ferr_nf_fbd = value; /* harvest the various error data we need */ pci_read_config_dword(pvt->branchmap_werrors, NERR_NF_FBD, &info->nerr_nf_fbd); pci_read_config_word(pvt->branchmap_werrors, RECMEMA, &info->recmema); pci_read_config_dword(pvt->branchmap_werrors, RECMEMB, &info->recmemb); pci_read_config_dword(pvt->branchmap_werrors, REDMEMB, &info->redmemb); /* Clear the error bits, by writing them back */ pci_write_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, value); } else { info->ferr_nf_fbd = 0; info->nerr_nf_fbd = 0; info->recmema = 0; info->recmemb = 0; info->redmemb = 0; } } /* * i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, * struct i5400_error_info *info, * int handle_errors); * * handle the Intel FATAL and unrecoverable errors, if any */ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, struct i5400_error_info *info, unsigned long allErrors) { char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80]; int branch; int channel; int bank; int buf_id; int rank; int rdwr; int ras, cas; int errnum; char *type = NULL; if (!allErrors) return; /* if no error, return now */ if (allErrors & ERROR_FAT_MASK) type = "FATAL"; else if (allErrors & FERR_NF_UNCORRECTABLE) type = "NON-FATAL uncorrected"; else type = "NON-FATAL recoverable"; /* ONLY ONE of the possible error bits will be set, as per the docs */ branch = extract_fbdchan_indx(info->ferr_fat_fbd); channel = branch; /* Use the NON-Recoverable macros to extract data */ bank = nrec_bank(info); rank = nrec_rank(info); buf_id = nrec_buf_id(info); rdwr = nrec_rdwr(info); ras = nrec_ras(info); cas = nrec_cas(info); debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", rank, channel, channel + 1, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas); /* Only 1 bit will be on */ errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); /* Form out message */ snprintf(msg, sizeof(msg), "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s " "RAS=%d CAS=%d %s Err=0x%lx (%s))", type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas, type, allErrors, error_name[errnum]); /* Call the helper to output message */ edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); } /* * i5400_process_fatal_error_info(struct mem_ctl_info *mci, * struct i5400_error_info *info, * int handle_errors); * * handle the Intel NON-FATAL errors, if any */ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, struct i5400_error_info *info) { char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80]; unsigned long allErrors; int branch; int channel; int bank; int rank; int rdwr; int ras, cas; int errnum; /* mask off the Error bits that are possible */ allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK); if (!allErrors) return; /* if no error, return now */ /* ONLY ONE of the possible error bits will be set, as per the docs */ if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) { i5400_proccess_non_recoverable_info(mci, info, allErrors); return; } /* Correctable errors */ if (allErrors & ERROR_NF_CORRECTABLE) { debugf0("\tCorrected bits= 0x%lx\n", allErrors); branch = extract_fbdchan_indx(info->ferr_nf_fbd); channel = 0; if (REC_ECC_LOCATOR_ODD(info->redmemb)) channel = 1; /* Convert channel to be based from zero, instead of * from branch base of 0 */ channel += branch; bank = rec_bank(info); rank = rec_rank(info); rdwr = rec_rdwr(info); ras = rec_ras(info); cas = rec_cas(info); /* Only 1 bit will be on */ errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); debugf0("\t\tCSROW= %d Channel= %d (Branch %d " "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", rank, channel, branch >> 1, bank, rdwr_str(rdwr), ras, cas); /* Form out message */ snprintf(msg, sizeof(msg), "Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s " "RAS=%d CAS=%d, CE Err=0x%lx (%s))", branch >> 1, bank, rdwr_str(rdwr), ras, cas, allErrors, error_name[errnum]); /* Call the helper to output message */ edac_mc_handle_fbd_ce(mci, rank, channel, msg); return; } /* Miscellaneous errors */ errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); branch = extract_fbdchan_indx(info->ferr_nf_fbd); i5400_mc_printk(mci, KERN_EMERG, "Non-Fatal misc error (Branch=%d Err=%#lx (%s))", branch >> 1, allErrors, error_name[errnum]); } /* * i5400_process_error_info Process the error info that is * in the 'info' structure, previously retrieved from hardware */ static void i5400_process_error_info(struct mem_ctl_info *mci, struct i5400_error_info *info) { u32 allErrors; /* First handle any fatal errors that occurred */ allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK); i5400_proccess_non_recoverable_info(mci, info, allErrors); /* now handle any non-fatal errors that occurred */ i5400_process_nonfatal_error_info(mci, info); } /* * i5400_clear_error Retrieve any error from the hardware * but do NOT process that error. * Used for 'clearing' out of previous errors * Called by the Core module. */ static void i5400_clear_error(struct mem_ctl_info *mci) { struct i5400_error_info info; i5400_get_error_info(mci, &info); } /* * i5400_check_error Retrieve and process errors reported by the * hardware. Called by the Core module. */ static void i5400_check_error(struct mem_ctl_info *mci) { struct i5400_error_info info; debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i5400_get_error_info(mci, &info); i5400_process_error_info(mci, &info); } /* * i5400_put_devices 'put' all the devices that we have * reserved via 'get' */ static void i5400_put_devices(struct mem_ctl_info *mci) { struct i5400_pvt *pvt; pvt = mci->pvt_info; /* Decrement usage count for devices */ pci_dev_put(pvt->branch_1); pci_dev_put(pvt->branch_0); pci_dev_put(pvt->fsb_error_regs); pci_dev_put(pvt->branchmap_werrors); } /* * i5400_get_devices Find and perform 'get' operation on the MCH's * device/functions we want to reference for this driver * * Need to 'get' device 16 func 1 and func 2 */ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) { struct i5400_pvt *pvt; struct pci_dev *pdev; pvt = mci->pvt_info; pvt->branchmap_werrors = NULL; pvt->fsb_error_regs = NULL; pvt->branch_0 = NULL; pvt->branch_1 = NULL; /* Attempt to 'get' the MCH register we want */ pdev = NULL; while (1) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR, pdev); if (!pdev) { /* End of list, leave */ i5400_printk(KERN_ERR, "'system address,Process Bus' " "device not found:" "vendor 0x%x device 0x%x ERR func 1 " "(broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR); return -ENODEV; } /* Store device 16 func 1 */ if (PCI_FUNC(pdev->devfn) == 1) break; } pvt->branchmap_werrors = pdev; pdev = NULL; while (1) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR, pdev); if (!pdev) { /* End of list, leave */ i5400_printk(KERN_ERR, "'system address,Process Bus' " "device not found:" "vendor 0x%x device 0x%x ERR func 2 " "(broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR); pci_dev_put(pvt->branchmap_werrors); return -ENODEV; } /* Store device 16 func 2 */ if (PCI_FUNC(pdev->devfn) == 2) break; } pvt->fsb_error_regs = pdev; debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", pci_name(pvt->system_address), pvt->system_address->vendor, pvt->system_address->device); debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", pci_name(pvt->branchmap_werrors), pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", pci_name(pvt->fsb_error_regs), pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); if (!pvt->branch_0) { i5400_printk(KERN_ERR, "MC: 'BRANCH 0' device not found:" "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0); pci_dev_put(pvt->fsb_error_regs); pci_dev_put(pvt->branchmap_werrors); return -ENODEV; } /* If this device claims to have more than 2 channels then * fetch Branch 1's information */ if (pvt->maxch < CHANNELS_PER_BRANCH) return 0; pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD1, NULL); if (!pvt->branch_1) { i5400_printk(KERN_ERR, "MC: 'BRANCH 1' device not found:" "vendor 0x%x device 0x%x Func 0 " "(broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD1); pci_dev_put(pvt->branch_0); pci_dev_put(pvt->fsb_error_regs); pci_dev_put(pvt->branchmap_werrors); return -ENODEV; } return 0; } /* * determine_amb_present * * the information is contained in NUM_MTRS_PER_BRANCH different * registers determining which of the NUM_MTRS_PER_BRANCH requires * knowing which channel is in question * * 2 branches, each with 2 channels * b0_ambpresent0 for channel '0' * b0_ambpresent1 for channel '1' * b1_ambpresent0 for channel '2' * b1_ambpresent1 for channel '3' */ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel) { int amb_present; if (channel < CHANNELS_PER_BRANCH) { if (channel & 0x1) amb_present = pvt->b0_ambpresent1; else amb_present = pvt->b0_ambpresent0; } else { if (channel & 0x1) amb_present = pvt->b1_ambpresent1; else amb_present = pvt->b1_ambpresent0; } return amb_present; } /* * determine_mtr(pvt, csrow, channel) * * return the proper MTR register as determine by the csrow and desired channel */ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel) { int mtr; int n; /* There is one MTR for each slot pair of FB-DIMMs, Each slot pair may be at branch 0 or branch 1. */ n = csrow; if (n >= NUM_MTRS_PER_BRANCH) { debugf0("ERROR: trying to access an invalid csrow: %d\n", csrow); return 0; } if (channel < CHANNELS_PER_BRANCH) mtr = pvt->b0_mtr[n]; else mtr = pvt->b1_mtr[n]; return mtr; } /* */ static void decode_mtr(int slot_row, u16 mtr) { int ans; ans = MTR_DIMMS_PRESENT(mtr); debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, ans ? "Present" : "NOT Present"); if (!ans) return; debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); debugf2("\t\tELECTRICAL THROTTLING is %s\n", MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); } static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel, struct i5400_dimm_info *dinfo) { int mtr; int amb_present_reg; int addrBits; mtr = determine_mtr(pvt, csrow, channel); if (MTR_DIMMS_PRESENT(mtr)) { amb_present_reg = determine_amb_present_reg(pvt, channel); /* Determine if there is a DIMM present in this DIMM slot */ if (amb_present_reg & (1 << csrow)) { /* Start with the number of bits for a Bank * on the DRAM */ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); /* Add thenumber of ROW bits */ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); /* add the number of COLUMN bits */ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); /* add the number of RANK bits */ addrBits += MTR_DIMM_RANK(mtr); addrBits += 6; /* add 64 bits per DIMM */ addrBits -= 20; /* divide by 2^^20 */ addrBits -= 3; /* 8 bits per bytes */ dinfo->megabytes = 1 << addrBits; } } } /* * calculate_dimm_size * * also will output a DIMM matrix map, if debug is enabled, for viewing * how the DIMMs are populated */ static void calculate_dimm_size(struct i5400_pvt *pvt) { struct i5400_dimm_info *dinfo; int csrow, max_csrows; char *p, *mem_buffer; int space, n; int channel; /* ================= Generate some debug output ================= */ space = PAGE_SIZE; mem_buffer = p = kmalloc(space, GFP_KERNEL); if (p == NULL) { i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n", __FILE__, __func__); return; } /* Scan all the actual CSROWS * and calculate the information for each DIMM * Start with the highest csrow first, to display it first * and work toward the 0th csrow */ max_csrows = pvt->maxdimmperch; for (csrow = max_csrows - 1; csrow >= 0; csrow--) { /* on an odd csrow, first output a 'boundary' marker, * then reset the message buffer */ if (csrow & 0x1) { n = snprintf(p, space, "---------------------------" "--------------------------------"); p += n; space -= n; debugf2("%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; } n = snprintf(p, space, "csrow %2d ", csrow); p += n; space -= n; for (channel = 0; channel < pvt->maxch; channel++) { dinfo = &pvt->dimm_info[csrow][channel]; handle_channel(pvt, csrow, channel, dinfo); n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); p += n; space -= n; } debugf2("%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; } /* Output the last bottom 'boundary' marker */ n = snprintf(p, space, "---------------------------" "--------------------------------"); p += n; space -= n; debugf2("%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; /* now output the 'channel' labels */ n = snprintf(p, space, " "); p += n; space -= n; for (channel = 0; channel < pvt->maxch; channel++) { n = snprintf(p, space, "channel %d | ", channel); p += n; space -= n; } /* output the last message and free buffer */ debugf2("%s\n", mem_buffer); kfree(mem_buffer); } /* * i5400_get_mc_regs read in the necessary registers and * cache locally * * Fills in the private data members */ static void i5400_get_mc_regs(struct mem_ctl_info *mci) { struct i5400_pvt *pvt; u32 actual_tolm; u16 limit; int slot_row; int maxch; int maxdimmperch; int way0, way1; pvt = mci->pvt_info; pci_read_config_dword(pvt->system_address, AMBASE, (u32 *) &pvt->ambase); pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), ((u32 *) &pvt->ambase) + sizeof(u32)); maxdimmperch = pvt->maxdimmperch; maxch = pvt->maxch; debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); /* Get the Branch Map regs */ pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); pvt->tolm >>= 12; debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, pvt->tolm); actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); /* Get the MIR[0-1] regs */ limit = (pvt->mir0 >> 4) & 0x0fff; way0 = pvt->mir0 & 0x1; way1 = pvt->mir0 & 0x2; debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); limit = (pvt->mir1 >> 4) & 0xfff; way0 = pvt->mir1 & 0x1; way1 = pvt->mir1 & 0x2; debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); /* Get the set of MTR[0-3] regs by each branch */ for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) { int where = MTR0 + (slot_row * sizeof(u16)); /* Branch 0 set of MTR registers */ pci_read_config_word(pvt->branch_0, where, &pvt->b0_mtr[slot_row]); debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, pvt->b0_mtr[slot_row]); if (pvt->maxch < CHANNELS_PER_BRANCH) { pvt->b1_mtr[slot_row] = 0; continue; } /* Branch 1 set of MTR registers */ pci_read_config_word(pvt->branch_1, where, &pvt->b1_mtr[slot_row]); debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, pvt->b1_mtr[slot_row]); } /* Read and dump branch 0's MTRs */ debugf2("\nMemory Technology Registers:\n"); debugf2(" Branch 0:\n"); for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) decode_mtr(slot_row, pvt->b0_mtr[slot_row]); pci_read_config_word(pvt->branch_0, AMBPRESENT_0, &pvt->b0_ambpresent0); debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); pci_read_config_word(pvt->branch_0, AMBPRESENT_1, &pvt->b0_ambpresent1); debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); /* Only if we have 2 branchs (4 channels) */ if (pvt->maxch < CHANNELS_PER_BRANCH) { pvt->b1_ambpresent0 = 0; pvt->b1_ambpresent1 = 0; } else { /* Read and dump branch 1's MTRs */ debugf2(" Branch 1:\n"); for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) decode_mtr(slot_row, pvt->b1_mtr[slot_row]); pci_read_config_word(pvt->branch_1, AMBPRESENT_0, &pvt->b1_ambpresent0); debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", pvt->b1_ambpresent0); pci_read_config_word(pvt->branch_1, AMBPRESENT_1, &pvt->b1_ambpresent1); debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", pvt->b1_ambpresent1); } /* Go and determine the size of each DIMM and place in an * orderly matrix */ calculate_dimm_size(pvt); } /* * i5400_init_csrows Initialize the 'csrows' table within * the mci control structure with the * addressing of memory. * * return: * 0 success * 1 no actual memory found on this MC */ static int i5400_init_csrows(struct mem_ctl_info *mci) { struct i5400_pvt *pvt; struct csrow_info *p_csrow; int empty, channel_count; int max_csrows; int mtr; int csrow_megs; int channel; int csrow; pvt = mci->pvt_info; channel_count = pvt->maxch; max_csrows = pvt->maxdimmperch; empty = 1; /* Assume NO memory */ for (csrow = 0; csrow < max_csrows; csrow++) { p_csrow = &mci->csrows[csrow]; p_csrow->csrow_idx = csrow; /* use branch 0 for the basis */ mtr = determine_mtr(pvt, csrow, 0); /* if no DIMMS on this row, continue */ if (!MTR_DIMMS_PRESENT(mtr)) continue; /* FAKE OUT VALUES, FIXME */ p_csrow->first_page = 0 + csrow * 20; p_csrow->last_page = 9 + csrow * 20; p_csrow->page_mask = 0xFFF; p_csrow->grain = 8; csrow_megs = 0; for (channel = 0; channel < pvt->maxch; channel++) csrow_megs += pvt->dimm_info[csrow][channel].megabytes; p_csrow->nr_pages = csrow_megs << 8; /* Assume DDR2 for now */ p_csrow->mtype = MEM_FB_DDR2; /* ask what device type on this row */ if (MTR_DRAM_WIDTH(mtr)) p_csrow->dtype = DEV_X8; else p_csrow->dtype = DEV_X4; p_csrow->edac_mode = EDAC_S8ECD8ED; empty = 0; } return empty; } /* * i5400_enable_error_reporting * Turn on the memory reporting features of the hardware */ static void i5400_enable_error_reporting(struct mem_ctl_info *mci) { struct i5400_pvt *pvt; u32 fbd_error_mask; pvt = mci->pvt_info; /* Read the FBD Error Mask Register */ pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD, &fbd_error_mask); /* Enable with a '0' */ fbd_error_mask &= ~(ENABLE_EMASK_ALL); pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD, fbd_error_mask); } /* * i5400_probe1 Probe for ONE instance of device to see if it is * present. * return: * 0 for FOUND a device * < 0 for error code */ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) { struct mem_ctl_info *mci; struct i5400_pvt *pvt; int num_channels; int num_dimms_per_channel; int num_csrows; if (dev_idx >= ARRAY_SIZE(i5400_devs)) return -EINVAL; debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", __FILE__, __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* We only are looking for func 0 of the set */ if (PCI_FUNC(pdev->devfn) != 0) return -ENODEV; /* As we don't have a motherboard identification routine to determine * actual number of slots/dimms per channel, we thus utilize the * resource as specified by the chipset. Thus, we might have * have more DIMMs per channel than actually on the mobo, but this * allows the driver to support up to the chipset max, without * some fancy mobo determination. */ num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL; num_channels = MAX_CHANNELS; num_csrows = num_dimms_per_channel; debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", __func__, num_channels, num_dimms_per_channel, num_csrows); /* allocate a new MC control structure */ mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); if (mci == NULL) return -ENOMEM; debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; /* record ptr to the generic device */ pvt = mci->pvt_info; pvt->system_address = pdev; /* Record this device in our private */ pvt->maxch = num_channels; pvt->maxdimmperch = num_dimms_per_channel; /* 'get' the pci devices we want to reserve for our use */ if (i5400_get_devices(mci, dev_idx)) goto fail0; /* Time to get serious */ i5400_get_mc_regs(mci); /* retrieve the hardware registers */ mci->mc_idx = 0; mci->mtype_cap = MEM_FLAG_FB_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i5400_edac.c"; mci->mod_ver = I5400_REVISION; mci->ctl_name = i5400_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; /* Set the function pointer to an actual operation function */ mci->edac_check = i5400_check_error; /* initialize the MC control structure 'csrows' table * with the mapping and control information */ if (i5400_init_csrows(mci)) { debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" " because i5400_init_csrows() returned nonzero " "value\n"); mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ } else { debugf1("MC: Enable error reporting now\n"); i5400_enable_error_reporting(mci); } /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", __FILE__, __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ goto fail1; } i5400_clear_error(mci); /* allocating generic PCI control info */ i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!i5400_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } return 0; /* Error exit unwinding stack */ fail1: i5400_put_devices(mci); fail0: edac_mc_free(mci); return -ENODEV; } /* * i5400_init_one constructor for one instance of device * * returns: * negative on error * count (>= 0) */ static int __devinit i5400_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rc; debugf0("MC: %s: %s()\n", __FILE__, __func__); /* wake up device */ rc = pci_enable_device(pdev); if (rc) return rc; /* now probe and enable the device */ return i5400_probe1(pdev, id->driver_data); } /* * i5400_remove_one destructor for one instance of device * */ static void __devexit i5400_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; debugf0("%s: %s()\n", __FILE__, __func__); if (i5400_pci) edac_pci_release_generic_ctl(i5400_pci); mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; /* retrieve references to resources, and free those resources */ i5400_put_devices(mci); edac_mc_free(mci); } /* * pci_device_id table for which devices we are looking for * * The "E500P" device is the first device supported. */ static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, {0,} /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i5400_pci_tbl); /* * i5400_driver pci_driver structure for this module * */ static struct pci_driver i5400_driver = { .name = "i5400_edac", .probe = i5400_init_one, .remove = __devexit_p(i5400_remove_one), .id_table = i5400_pci_tbl, }; /* * i5400_init Module entry function * Try to initialize this module for its devices */ static int __init i5400_init(void) { int pci_rc; debugf2("MC: %s: %s()\n", __FILE__, __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i5400_driver); return (pci_rc < 0) ? pci_rc : 0; } /* * i5400_exit() Module exit function * Unregister the driver */ static void __exit i5400_exit(void) { debugf2("MC: %s: %s()\n", __FILE__, __func__); pci_unregister_driver(&i5400_driver); } module_init(i5400_init); module_exit(i5400_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - " I5400_REVISION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
BoostPop/kernel_lge_hammerhead
drivers/spi/spi-dw-pci.c
4806
4143
/* * PCI interface driver for DW SPI Core * * Copyright (c) 2009, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/module.h> #include "spi-dw.h" #define DRIVER_NAME "dw_spi_pci" struct dw_spi_pci { struct pci_dev *pdev; struct dw_spi dws; }; static int __devinit spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct dw_spi_pci *dwpci; struct dw_spi *dws; int pci_bar = 0; int ret; printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n", pdev->vendor, pdev->device); ret = pci_enable_device(pdev); if (ret) return ret; dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL); if (!dwpci) { ret = -ENOMEM; goto err_disable; } dwpci->pdev = pdev; dws = &dwpci->dws; /* Get basic io resource and map it */ dws->paddr = pci_resource_start(pdev, pci_bar); dws->iolen = pci_resource_len(pdev, pci_bar); ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); if (ret) goto err_kfree; dws->regs = ioremap_nocache((unsigned long)dws->paddr, pci_resource_len(pdev, pci_bar)); if (!dws->regs) { ret = -ENOMEM; goto err_release_reg; } dws->parent_dev = &pdev->dev; dws->bus_num = 0; dws->num_cs = 4; dws->irq = pdev->irq; /* * Specific handling for Intel MID paltforms, like dma setup, * clock rate, FIFO depth. */ if (pdev->device == 0x0800) { ret = dw_spi_mid_init(dws); if (ret) goto err_unmap; } ret = dw_spi_add_host(dws); if (ret) goto err_unmap; /* PCI hook and SPI hook use the same drv data */ pci_set_drvdata(pdev, dwpci); return 0; err_unmap: iounmap(dws->regs); err_release_reg: pci_release_region(pdev, pci_bar); err_kfree: kfree(dwpci); err_disable: pci_disable_device(pdev); return ret; } static void __devexit spi_pci_remove(struct pci_dev *pdev) { struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); dw_spi_remove_host(&dwpci->dws); iounmap(dwpci->dws.regs); pci_release_region(pdev, 0); kfree(dwpci); pci_disable_device(pdev); } #ifdef CONFIG_PM static int spi_suspend(struct pci_dev *pdev, pm_message_t state) { struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); int ret; ret = dw_spi_suspend_host(&dwpci->dws); if (ret) return ret; pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return ret; } static int spi_resume(struct pci_dev *pdev) { struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); int ret; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) return ret; return dw_spi_resume_host(&dwpci->dws); } #else #define spi_suspend NULL #define spi_resume NULL #endif static DEFINE_PCI_DEVICE_TABLE(pci_ids) = { /* Intel MID platform SPI controller 0 */ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, {}, }; static struct pci_driver dw_spi_driver = { .name = DRIVER_NAME, .id_table = pci_ids, .probe = spi_pci_probe, .remove = __devexit_p(spi_pci_remove), .suspend = spi_suspend, .resume = spi_resume, }; static int __init mrst_spi_init(void) { return pci_register_driver(&dw_spi_driver); } static void __exit mrst_spi_exit(void) { pci_unregister_driver(&dw_spi_driver); } module_init(mrst_spi_init); module_exit(mrst_spi_exit); MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); MODULE_DESCRIPTION("PCI interface driver for DW SPI Core"); MODULE_LICENSE("GPL v2");
gpl-2.0
Kra1o5/android_kernel_huawei_msm8x25-common
drivers/spi/spi-tegra.c
5062
16018
/* * Driver for Nvidia TEGRA spi controller. * * Copyright (C) 2010 Google, Inc. * * Author: * Erik Gilling <konkers@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include <mach/dma.h> #define SLINK_COMMAND 0x000 #define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) #define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) #define SLINK_BOTH_EN (1 << 10) #define SLINK_CS_SW (1 << 11) #define SLINK_CS_VALUE (1 << 12) #define SLINK_CS_POLARITY (1 << 13) #define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16) #define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16) #define SLINK_IDLE_SDA_PULL_LOW (2 << 16) #define SLINK_IDLE_SDA_PULL_HIGH (3 << 16) #define SLINK_IDLE_SDA_MASK (3 << 16) #define SLINK_CS_POLARITY1 (1 << 20) #define SLINK_CK_SDA (1 << 21) #define SLINK_CS_POLARITY2 (1 << 22) #define SLINK_CS_POLARITY3 (1 << 23) #define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24) #define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24) #define SLINK_IDLE_SCLK_PULL_LOW (2 << 24) #define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24) #define SLINK_IDLE_SCLK_MASK (3 << 24) #define SLINK_M_S (1 << 28) #define SLINK_WAIT (1 << 29) #define SLINK_GO (1 << 30) #define SLINK_ENB (1 << 31) #define SLINK_COMMAND2 0x004 #define SLINK_LSBFE (1 << 0) #define SLINK_SSOE (1 << 1) #define SLINK_SPIE (1 << 4) #define SLINK_BIDIROE (1 << 6) #define SLINK_MODFEN (1 << 7) #define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8) #define SLINK_CS_ACTIVE_BETWEEN (1 << 17) #define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18) #define SLINK_SS_SETUP(x) (((x) & 0x3) << 20) #define SLINK_FIFO_REFILLS_0 (0 << 22) #define SLINK_FIFO_REFILLS_1 (1 << 22) #define SLINK_FIFO_REFILLS_2 (2 << 22) #define SLINK_FIFO_REFILLS_3 (3 << 22) #define SLINK_FIFO_REFILLS_MASK (3 << 22) #define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26) #define SLINK_SPC0 (1 << 29) #define SLINK_TXEN (1 << 30) #define SLINK_RXEN (1 << 31) #define SLINK_STATUS 0x008 #define SLINK_COUNT(val) (((val) >> 0) & 0x1f) #define SLINK_WORD(val) (((val) >> 5) & 0x1f) #define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff) #define SLINK_MODF (1 << 16) #define SLINK_RX_UNF (1 << 18) #define SLINK_TX_OVF (1 << 19) #define SLINK_TX_FULL (1 << 20) #define SLINK_TX_EMPTY (1 << 21) #define SLINK_RX_FULL (1 << 22) #define SLINK_RX_EMPTY (1 << 23) #define SLINK_TX_UNF (1 << 24) #define SLINK_RX_OVF (1 << 25) #define SLINK_TX_FLUSH (1 << 26) #define SLINK_RX_FLUSH (1 << 27) #define SLINK_SCLK (1 << 28) #define SLINK_ERR (1 << 29) #define SLINK_RDY (1 << 30) #define SLINK_BSY (1 << 31) #define SLINK_MAS_DATA 0x010 #define SLINK_SLAVE_DATA 0x014 #define SLINK_DMA_CTL 0x018 #define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0) #define SLINK_TX_TRIG_1 (0 << 16) #define SLINK_TX_TRIG_4 (1 << 16) #define SLINK_TX_TRIG_8 (2 << 16) #define SLINK_TX_TRIG_16 (3 << 16) #define SLINK_TX_TRIG_MASK (3 << 16) #define SLINK_RX_TRIG_1 (0 << 18) #define SLINK_RX_TRIG_4 (1 << 18) #define SLINK_RX_TRIG_8 (2 << 18) #define SLINK_RX_TRIG_16 (3 << 18) #define SLINK_RX_TRIG_MASK (3 << 18) #define SLINK_PACKED (1 << 20) #define SLINK_PACK_SIZE_4 (0 << 21) #define SLINK_PACK_SIZE_8 (1 << 21) #define SLINK_PACK_SIZE_16 (2 << 21) #define SLINK_PACK_SIZE_32 (3 << 21) #define SLINK_PACK_SIZE_MASK (3 << 21) #define SLINK_IE_TXC (1 << 26) #define SLINK_IE_RXC (1 << 27) #define SLINK_DMA_EN (1 << 31) #define SLINK_STATUS2 0x01c #define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0) #define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16) #define SLINK_TX_FIFO 0x100 #define SLINK_RX_FIFO 0x180 static const unsigned long spi_tegra_req_sels[] = { TEGRA_DMA_REQ_SEL_SL2B1, TEGRA_DMA_REQ_SEL_SL2B2, TEGRA_DMA_REQ_SEL_SL2B3, TEGRA_DMA_REQ_SEL_SL2B4, }; #define BB_LEN 32 struct spi_tegra_data { struct spi_master *master; struct platform_device *pdev; spinlock_t lock; struct clk *clk; void __iomem *base; unsigned long phys; u32 cur_speed; struct list_head queue; struct spi_transfer *cur; unsigned cur_pos; unsigned cur_len; unsigned cur_bytes_per_word; /* The tegra spi controller has a bug which causes the first word * in PIO transactions to be garbage. Since packed DMA transactions * require transfers to be 4 byte aligned we need a bounce buffer * for the generic case. */ struct tegra_dma_req rx_dma_req; struct tegra_dma_channel *rx_dma; u32 *rx_bb; dma_addr_t rx_bb_phys; }; static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi, unsigned long reg) { return readl(tspi->base + reg); } static inline void spi_tegra_writel(struct spi_tegra_data *tspi, unsigned long val, unsigned long reg) { writel(val, tspi->base + reg); } static void spi_tegra_go(struct spi_tegra_data *tspi) { unsigned long val; wmb(); val = spi_tegra_readl(tspi, SLINK_DMA_CTL); val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN; val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1); spi_tegra_writel(tspi, val, SLINK_DMA_CTL); tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req); val |= SLINK_DMA_EN; spi_tegra_writel(tspi, val, SLINK_DMA_CTL); } static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi, struct spi_transfer *t) { unsigned len = min(t->len - tspi->cur_pos, BB_LEN * tspi->cur_bytes_per_word); u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos; int i, j; unsigned long val; val = spi_tegra_readl(tspi, SLINK_COMMAND); val &= ~SLINK_WORD_SIZE(~0); val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1); spi_tegra_writel(tspi, val, SLINK_COMMAND); for (i = 0; i < len; i += tspi->cur_bytes_per_word) { val = 0; for (j = 0; j < tspi->cur_bytes_per_word; j++) val |= tx_buf[i + j] << j * 8; spi_tegra_writel(tspi, val, SLINK_TX_FIFO); } tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4; return len; } static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi, struct spi_transfer *t) { unsigned len = tspi->cur_len; u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos; int i, j; unsigned long val; for (i = 0; i < len; i += tspi->cur_bytes_per_word) { val = tspi->rx_bb[i / tspi->cur_bytes_per_word]; for (j = 0; j < tspi->cur_bytes_per_word; j++) rx_buf[i + j] = (val >> (j * 8)) & 0xff; } return len; } static void spi_tegra_start_transfer(struct spi_device *spi, struct spi_transfer *t) { struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); u32 speed; u8 bits_per_word; unsigned long val; speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; bits_per_word = t->bits_per_word ? t->bits_per_word : spi->bits_per_word; tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1; if (speed != tspi->cur_speed) clk_set_rate(tspi->clk, speed); if (tspi->cur_speed == 0) clk_enable(tspi->clk); tspi->cur_speed = speed; val = spi_tegra_readl(tspi, SLINK_COMMAND2); val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN; if (t->rx_buf) val |= SLINK_RXEN; if (t->tx_buf) val |= SLINK_TXEN; val |= SLINK_SS_EN_CS(spi->chip_select); val |= SLINK_SPIE; spi_tegra_writel(tspi, val, SLINK_COMMAND2); val = spi_tegra_readl(tspi, SLINK_COMMAND); val &= ~SLINK_BIT_LENGTH(~0); val |= SLINK_BIT_LENGTH(bits_per_word - 1); /* FIXME: should probably control CS manually so that we can be sure * it does not go low between transfer and to support delay_usecs * correctly. */ val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW; if (spi->mode & SPI_CPHA) val |= SLINK_CK_SDA; if (spi->mode & SPI_CPOL) val |= SLINK_IDLE_SCLK_DRIVE_HIGH; else val |= SLINK_IDLE_SCLK_DRIVE_LOW; val |= SLINK_M_S; spi_tegra_writel(tspi, val, SLINK_COMMAND); spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS); tspi->cur = t; tspi->cur_pos = 0; tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t); spi_tegra_go(tspi); } static void spi_tegra_start_message(struct spi_device *spi, struct spi_message *m) { struct spi_transfer *t; m->actual_length = 0; m->status = 0; t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); spi_tegra_start_transfer(spi, t); } static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req) { struct spi_tegra_data *tspi = req->dev; unsigned long flags; struct spi_message *m; struct spi_device *spi; int timeout = 0; unsigned long val; /* the SPI controller may come back with both the BSY and RDY bits * set. In this case we need to wait for the BSY bit to clear so * that we are sure the DMA is finished. 1000 reads was empirically * determined to be long enough. */ while (timeout++ < 1000) { if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY)) break; } spin_lock_irqsave(&tspi->lock, flags); val = spi_tegra_readl(tspi, SLINK_STATUS); val |= SLINK_RDY; spi_tegra_writel(tspi, val, SLINK_STATUS); m = list_first_entry(&tspi->queue, struct spi_message, queue); if (timeout >= 1000) m->status = -EIO; spi = m->state; tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur); m->actual_length += tspi->cur_pos; if (tspi->cur_pos < tspi->cur->len) { tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur); spi_tegra_go(tspi); } else if (!list_is_last(&tspi->cur->transfer_list, &m->transfers)) { tspi->cur = list_first_entry(&tspi->cur->transfer_list, struct spi_transfer, transfer_list); spi_tegra_start_transfer(spi, tspi->cur); } else { list_del(&m->queue); m->complete(m->context); if (!list_empty(&tspi->queue)) { m = list_first_entry(&tspi->queue, struct spi_message, queue); spi = m->state; spi_tegra_start_message(spi, m); } else { clk_disable(tspi->clk); tspi->cur_speed = 0; } } spin_unlock_irqrestore(&tspi->lock, flags); } static int spi_tegra_setup(struct spi_device *spi) { struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); unsigned long cs_bit; unsigned long val; unsigned long flags; dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", spi->bits_per_word, spi->mode & SPI_CPOL ? "" : "~", spi->mode & SPI_CPHA ? "" : "~", spi->max_speed_hz); switch (spi->chip_select) { case 0: cs_bit = SLINK_CS_POLARITY; break; case 1: cs_bit = SLINK_CS_POLARITY1; break; case 2: cs_bit = SLINK_CS_POLARITY2; break; case 4: cs_bit = SLINK_CS_POLARITY3; break; default: return -EINVAL; } spin_lock_irqsave(&tspi->lock, flags); val = spi_tegra_readl(tspi, SLINK_COMMAND); if (spi->mode & SPI_CS_HIGH) val |= cs_bit; else val &= ~cs_bit; spi_tegra_writel(tspi, val, SLINK_COMMAND); spin_unlock_irqrestore(&tspi->lock, flags); return 0; } static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m) { struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); struct spi_transfer *t; unsigned long flags; int was_empty; if (list_empty(&m->transfers) || !m->complete) return -EINVAL; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->bits_per_word < 0 || t->bits_per_word > 32) return -EINVAL; if (t->len == 0) return -EINVAL; if (!t->rx_buf && !t->tx_buf) return -EINVAL; } m->state = spi; spin_lock_irqsave(&tspi->lock, flags); was_empty = list_empty(&tspi->queue); list_add_tail(&m->queue, &tspi->queue); if (was_empty) spi_tegra_start_message(spi, m); spin_unlock_irqrestore(&tspi->lock, flags); return 0; } static int __devinit spi_tegra_probe(struct platform_device *pdev) { struct spi_master *master; struct spi_tegra_data *tspi; struct resource *r; int ret; master = spi_alloc_master(&pdev->dev, sizeof *tspi); if (master == NULL) { dev_err(&pdev->dev, "master allocation failed\n"); return -ENOMEM; } /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->bus_num = pdev->id; master->setup = spi_tegra_setup; master->transfer = spi_tegra_transfer; master->num_chipselect = 4; dev_set_drvdata(&pdev->dev, master); tspi = spi_master_get_devdata(master); tspi->master = master; tspi->pdev = pdev; spin_lock_init(&tspi->lock); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { ret = -ENODEV; goto err0; } if (!request_mem_region(r->start, resource_size(r), dev_name(&pdev->dev))) { ret = -EBUSY; goto err0; } tspi->phys = r->start; tspi->base = ioremap(r->start, resource_size(r)); if (!tspi->base) { dev_err(&pdev->dev, "can't ioremap iomem\n"); ret = -ENOMEM; goto err1; } tspi->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(tspi->clk)) { dev_err(&pdev->dev, "can not get clock\n"); ret = PTR_ERR(tspi->clk); goto err2; } INIT_LIST_HEAD(&tspi->queue); tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); if (!tspi->rx_dma) { dev_err(&pdev->dev, "can not allocate rx dma channel\n"); ret = -ENODEV; goto err3; } tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN, &tspi->rx_bb_phys, GFP_KERNEL); if (!tspi->rx_bb) { dev_err(&pdev->dev, "can not allocate rx bounce buffer\n"); ret = -ENOMEM; goto err4; } tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete; tspi->rx_dma_req.to_memory = 1; tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys; tspi->rx_dma_req.dest_bus_width = 32; tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO; tspi->rx_dma_req.source_bus_width = 32; tspi->rx_dma_req.source_wrap = 4; tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; tspi->rx_dma_req.dev = tspi; master->dev.of_node = pdev->dev.of_node; ret = spi_register_master(master); if (ret < 0) goto err5; return ret; err5: dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, tspi->rx_bb, tspi->rx_bb_phys); err4: tegra_dma_free_channel(tspi->rx_dma); err3: clk_put(tspi->clk); err2: iounmap(tspi->base); err1: release_mem_region(r->start, resource_size(r)); err0: spi_master_put(master); return ret; } static int __devexit spi_tegra_remove(struct platform_device *pdev) { struct spi_master *master; struct spi_tegra_data *tspi; struct resource *r; master = dev_get_drvdata(&pdev->dev); tspi = spi_master_get_devdata(master); spi_unregister_master(master); tegra_dma_free_channel(tspi->rx_dma); dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, tspi->rx_bb, tspi->rx_bb_phys); clk_put(tspi->clk); iounmap(tspi->base); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(r->start, resource_size(r)); return 0; } MODULE_ALIAS("platform:spi_tegra"); #ifdef CONFIG_OF static struct of_device_id spi_tegra_of_match_table[] __devinitdata = { { .compatible = "nvidia,tegra20-spi", }, {} }; MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table); #else /* CONFIG_OF */ #define spi_tegra_of_match_table NULL #endif /* CONFIG_OF */ static struct platform_driver spi_tegra_driver = { .driver = { .name = "spi_tegra", .owner = THIS_MODULE, .of_match_table = spi_tegra_of_match_table, }, .probe = spi_tegra_probe, .remove = __devexit_p(spi_tegra_remove), }; module_platform_driver(spi_tegra_driver); MODULE_LICENSE("GPL");
gpl-2.0
computersforpeace/UBIFS-backports
drivers/video/xilinxfb.c
5062
13897
/* * Xilinx TFT frame buffer driver * * Author: MontaVista Software, Inc. * source@mvista.com * * 2002-2007 (c) MontaVista Software, Inc. * 2007 (c) Secret Lab Technologies, Ltd. * 2009 (c) Xilinx Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ /* * This driver was based on au1100fb.c by MontaVista rewritten for 2.6 * by Embedded Alley Solutions <source@embeddedalley.com>, which in turn * was based on skeletonfb.c, Skeleton for a frame buffer device by * Geert Uytterhoeven. */ #include <linux/device.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/xilinxfb.h> #include <linux/slab.h> #ifdef CONFIG_PPC_DCR #include <asm/dcr.h> #endif #define DRIVER_NAME "xilinxfb" /* * Xilinx calls it "PLB TFT LCD Controller" though it can also be used for * the VGA port on the Xilinx ML40x board. This is a hardware display * controller for a 640x480 resolution TFT or VGA screen. * * The interface to the framebuffer is nice and simple. There are two * control registers. The first tells the LCD interface where in memory * the frame buffer is (only the 11 most significant bits are used, so * don't start thinking about scrolling). The second allows the LCD to * be turned on or off as well as rotated 180 degrees. * * In case of direct PLB access the second control register will be at * an offset of 4 as compared to the DCR access where the offset is 1 * i.e. REG_CTRL. So this is taken care in the function * xilinx_fb_out_be32 where it left shifts the offset 2 times in case of * direct PLB access. */ #define NUM_REGS 2 #define REG_FB_ADDR 0 #define REG_CTRL 1 #define REG_CTRL_ENABLE 0x0001 #define REG_CTRL_ROTATE 0x0002 /* * The hardware only handles a single mode: 640x480 24 bit true * color. Each pixel gets a word (32 bits) of memory. Within each word, * the 8 most significant bits are ignored, the next 8 bits are the red * level, the next 8 bits are the green level and the 8 least * significant bits are the blue level. Each row of the LCD uses 1024 * words, but only the first 640 pixels are displayed with the other 384 * words being ignored. There are 480 rows. */ #define BYTES_PER_PIXEL 4 #define BITS_PER_PIXEL (BYTES_PER_PIXEL * 8) #define RED_SHIFT 16 #define GREEN_SHIFT 8 #define BLUE_SHIFT 0 #define PALETTE_ENTRIES_NO 16 /* passed to fb_alloc_cmap() */ /* * Default xilinxfb configuration */ static struct xilinxfb_platform_data xilinx_fb_default_pdata = { .xres = 640, .yres = 480, .xvirt = 1024, .yvirt = 480, }; /* * Here are the default fb_fix_screeninfo and fb_var_screeninfo structures */ static struct fb_fix_screeninfo xilinx_fb_fix = { .id = "Xilinx", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .accel = FB_ACCEL_NONE }; static struct fb_var_screeninfo xilinx_fb_var = { .bits_per_pixel = BITS_PER_PIXEL, .red = { RED_SHIFT, 8, 0 }, .green = { GREEN_SHIFT, 8, 0 }, .blue = { BLUE_SHIFT, 8, 0 }, .transp = { 0, 0, 0 }, .activate = FB_ACTIVATE_NOW }; #define PLB_ACCESS_FLAG 0x1 /* 1 = PLB, 0 = DCR */ struct xilinxfb_drvdata { struct fb_info info; /* FB driver info record */ phys_addr_t regs_phys; /* phys. address of the control registers */ void __iomem *regs; /* virt. address of the control registers */ #ifdef CONFIG_PPC_DCR dcr_host_t dcr_host; unsigned int dcr_len; #endif void *fb_virt; /* virt. address of the frame buffer */ dma_addr_t fb_phys; /* phys. address of the frame buffer */ int fb_alloced; /* Flag, was the fb memory alloced? */ u8 flags; /* features of the driver */ u32 reg_ctrl_default; u32 pseudo_palette[PALETTE_ENTRIES_NO]; /* Fake palette of 16 colors */ }; #define to_xilinxfb_drvdata(_info) \ container_of(_info, struct xilinxfb_drvdata, info) /* * The XPS TFT Controller can be accessed through PLB or DCR interface. * To perform the read/write on the registers we need to check on * which bus its connected and call the appropriate write API. */ static void xilinx_fb_out_be32(struct xilinxfb_drvdata *drvdata, u32 offset, u32 val) { if (drvdata->flags & PLB_ACCESS_FLAG) out_be32(drvdata->regs + (offset << 2), val); #ifdef CONFIG_PPC_DCR else dcr_write(drvdata->dcr_host, offset, val); #endif } static int xilinx_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi) { u32 *palette = fbi->pseudo_palette; if (regno >= PALETTE_ENTRIES_NO) return -EINVAL; if (fbi->var.grayscale) { /* Convert color to grayscale. * grayscale = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28 + 127) >> 8; } /* fbi->fix.visual is always FB_VISUAL_TRUECOLOR */ /* We only handle 8 bits of each color. */ red >>= 8; green >>= 8; blue >>= 8; palette[regno] = (red << RED_SHIFT) | (green << GREEN_SHIFT) | (blue << BLUE_SHIFT); return 0; } static int xilinx_fb_blank(int blank_mode, struct fb_info *fbi) { struct xilinxfb_drvdata *drvdata = to_xilinxfb_drvdata(fbi); switch (blank_mode) { case FB_BLANK_UNBLANK: /* turn on panel */ xilinx_fb_out_be32(drvdata, REG_CTRL, drvdata->reg_ctrl_default); break; case FB_BLANK_NORMAL: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_POWERDOWN: /* turn off panel */ xilinx_fb_out_be32(drvdata, REG_CTRL, 0); default: break; } return 0; /* success */ } static struct fb_ops xilinxfb_ops = { .owner = THIS_MODULE, .fb_setcolreg = xilinx_fb_setcolreg, .fb_blank = xilinx_fb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* --------------------------------------------------------------------- * Bus independent setup/teardown */ static int xilinxfb_assign(struct device *dev, struct xilinxfb_drvdata *drvdata, unsigned long physaddr, struct xilinxfb_platform_data *pdata) { int rc; int fbsize = pdata->xvirt * pdata->yvirt * BYTES_PER_PIXEL; if (drvdata->flags & PLB_ACCESS_FLAG) { /* * Map the control registers in if the controller * is on direct PLB interface. */ if (!request_mem_region(physaddr, 8, DRIVER_NAME)) { dev_err(dev, "Couldn't lock memory region at 0x%08lX\n", physaddr); rc = -ENODEV; goto err_region; } drvdata->regs_phys = physaddr; drvdata->regs = ioremap(physaddr, 8); if (!drvdata->regs) { dev_err(dev, "Couldn't lock memory region at 0x%08lX\n", physaddr); rc = -ENODEV; goto err_map; } } /* Allocate the framebuffer memory */ if (pdata->fb_phys) { drvdata->fb_phys = pdata->fb_phys; drvdata->fb_virt = ioremap(pdata->fb_phys, fbsize); } else { drvdata->fb_alloced = 1; drvdata->fb_virt = dma_alloc_coherent(dev, PAGE_ALIGN(fbsize), &drvdata->fb_phys, GFP_KERNEL); } if (!drvdata->fb_virt) { dev_err(dev, "Could not allocate frame buffer memory\n"); rc = -ENOMEM; if (drvdata->flags & PLB_ACCESS_FLAG) goto err_fbmem; else goto err_region; } /* Clear (turn to black) the framebuffer */ memset_io((void __iomem *)drvdata->fb_virt, 0, fbsize); /* Tell the hardware where the frame buffer is */ xilinx_fb_out_be32(drvdata, REG_FB_ADDR, drvdata->fb_phys); /* Turn on the display */ drvdata->reg_ctrl_default = REG_CTRL_ENABLE; if (pdata->rotate_screen) drvdata->reg_ctrl_default |= REG_CTRL_ROTATE; xilinx_fb_out_be32(drvdata, REG_CTRL, drvdata->reg_ctrl_default); /* Fill struct fb_info */ drvdata->info.device = dev; drvdata->info.screen_base = (void __iomem *)drvdata->fb_virt; drvdata->info.fbops = &xilinxfb_ops; drvdata->info.fix = xilinx_fb_fix; drvdata->info.fix.smem_start = drvdata->fb_phys; drvdata->info.fix.smem_len = fbsize; drvdata->info.fix.line_length = pdata->xvirt * BYTES_PER_PIXEL; drvdata->info.pseudo_palette = drvdata->pseudo_palette; drvdata->info.flags = FBINFO_DEFAULT; drvdata->info.var = xilinx_fb_var; drvdata->info.var.height = pdata->screen_height_mm; drvdata->info.var.width = pdata->screen_width_mm; drvdata->info.var.xres = pdata->xres; drvdata->info.var.yres = pdata->yres; drvdata->info.var.xres_virtual = pdata->xvirt; drvdata->info.var.yres_virtual = pdata->yvirt; /* Allocate a colour map */ rc = fb_alloc_cmap(&drvdata->info.cmap, PALETTE_ENTRIES_NO, 0); if (rc) { dev_err(dev, "Fail to allocate colormap (%d entries)\n", PALETTE_ENTRIES_NO); goto err_cmap; } /* Register new frame buffer */ rc = register_framebuffer(&drvdata->info); if (rc) { dev_err(dev, "Could not register frame buffer\n"); goto err_regfb; } if (drvdata->flags & PLB_ACCESS_FLAG) { /* Put a banner in the log (for DEBUG) */ dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr, drvdata->regs); } /* Put a banner in the log (for DEBUG) */ dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n", (unsigned long long)drvdata->fb_phys, drvdata->fb_virt, fbsize); return 0; /* success */ err_regfb: fb_dealloc_cmap(&drvdata->info.cmap); err_cmap: if (drvdata->fb_alloced) dma_free_coherent(dev, PAGE_ALIGN(fbsize), drvdata->fb_virt, drvdata->fb_phys); else iounmap(drvdata->fb_virt); /* Turn off the display */ xilinx_fb_out_be32(drvdata, REG_CTRL, 0); err_fbmem: if (drvdata->flags & PLB_ACCESS_FLAG) iounmap(drvdata->regs); err_map: if (drvdata->flags & PLB_ACCESS_FLAG) release_mem_region(physaddr, 8); err_region: kfree(drvdata); dev_set_drvdata(dev, NULL); return rc; } static int xilinxfb_release(struct device *dev) { struct xilinxfb_drvdata *drvdata = dev_get_drvdata(dev); #if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO) xilinx_fb_blank(VESA_POWERDOWN, &drvdata->info); #endif unregister_framebuffer(&drvdata->info); fb_dealloc_cmap(&drvdata->info.cmap); if (drvdata->fb_alloced) dma_free_coherent(dev, PAGE_ALIGN(drvdata->info.fix.smem_len), drvdata->fb_virt, drvdata->fb_phys); else iounmap(drvdata->fb_virt); /* Turn off the display */ xilinx_fb_out_be32(drvdata, REG_CTRL, 0); /* Release the resources, as allocated based on interface */ if (drvdata->flags & PLB_ACCESS_FLAG) { iounmap(drvdata->regs); release_mem_region(drvdata->regs_phys, 8); } #ifdef CONFIG_PPC_DCR else dcr_unmap(drvdata->dcr_host, drvdata->dcr_len); #endif kfree(drvdata); dev_set_drvdata(dev, NULL); return 0; } /* --------------------------------------------------------------------- * OF bus binding */ static int __devinit xilinxfb_of_probe(struct platform_device *op) { const u32 *prop; u32 *p; u32 tft_access; struct xilinxfb_platform_data pdata; struct resource res; int size, rc; struct xilinxfb_drvdata *drvdata; /* Copy with the default pdata (not a ptr reference!) */ pdata = xilinx_fb_default_pdata; /* Allocate the driver data region */ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL); if (!drvdata) { dev_err(&op->dev, "Couldn't allocate device private record\n"); return -ENOMEM; } /* * To check whether the core is connected directly to DCR or PLB * interface and initialize the tft_access accordingly. */ p = (u32 *)of_get_property(op->dev.of_node, "xlnx,dcr-splb-slave-if", NULL); tft_access = p ? *p : 0; /* * Fill the resource structure if its direct PLB interface * otherwise fill the dcr_host structure. */ if (tft_access) { drvdata->flags |= PLB_ACCESS_FLAG; rc = of_address_to_resource(op->dev.of_node, 0, &res); if (rc) { dev_err(&op->dev, "invalid address\n"); goto err; } } #ifdef CONFIG_PPC_DCR else { int start; res.start = 0; start = dcr_resource_start(op->dev.of_node, 0); drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0); drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len); if (!DCR_MAP_OK(drvdata->dcr_host)) { dev_err(&op->dev, "invalid DCR address\n"); goto err; } } #endif prop = of_get_property(op->dev.of_node, "phys-size", &size); if ((prop) && (size >= sizeof(u32)*2)) { pdata.screen_width_mm = prop[0]; pdata.screen_height_mm = prop[1]; } prop = of_get_property(op->dev.of_node, "resolution", &size); if ((prop) && (size >= sizeof(u32)*2)) { pdata.xres = prop[0]; pdata.yres = prop[1]; } prop = of_get_property(op->dev.of_node, "virtual-resolution", &size); if ((prop) && (size >= sizeof(u32)*2)) { pdata.xvirt = prop[0]; pdata.yvirt = prop[1]; } if (of_find_property(op->dev.of_node, "rotate-display", NULL)) pdata.rotate_screen = 1; dev_set_drvdata(&op->dev, drvdata); return xilinxfb_assign(&op->dev, drvdata, res.start, &pdata); err: kfree(drvdata); return -ENODEV; } static int __devexit xilinxfb_of_remove(struct platform_device *op) { return xilinxfb_release(&op->dev); } /* Match table for of_platform binding */ static struct of_device_id xilinxfb_of_match[] __devinitdata = { { .compatible = "xlnx,xps-tft-1.00.a", }, { .compatible = "xlnx,xps-tft-2.00.a", }, { .compatible = "xlnx,xps-tft-2.01.a", }, { .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", }, { .compatible = "xlnx,plb-dvi-cntlr-ref-1.00.c", }, {}, }; MODULE_DEVICE_TABLE(of, xilinxfb_of_match); static struct platform_driver xilinxfb_of_driver = { .probe = xilinxfb_of_probe, .remove = __devexit_p(xilinxfb_of_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = xilinxfb_of_match, }, }; module_platform_driver(xilinxfb_of_driver); MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); MODULE_DESCRIPTION("Xilinx TFT frame buffer driver"); MODULE_LICENSE("GPL");
gpl-2.0
ShinySide/HispAsian_S5
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
5062
11681
/* * SH7366 Setup * * Copyright (C) 2008 Renesas Solutions * * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/uio_driver.h> #include <linux/sh_timer.h> #include <linux/usb/r8a66597.h> #include <asm/clock.h> static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .port_reg = 0xa405013e, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct resource iic_resources[] = { [0] = { .name = "IIC", .start = 0x04470000, .end = 0x04470017, .flags = IORESOURCE_MEM, }, [1] = { .start = 96, .end = 99, .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic_device = { .name = "i2c-sh_mobile", .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, }; static struct r8a66597_platdata r8a66597_data = { .on_chip = 1, }; static struct resource usb_host_resources[] = { [0] = { .start = 0xa4d80000, .end = 0xa4d800ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 65, .end = 65, .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device usb_host_device = { .name = "r8a66597_hcd", .id = -1, .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, .platform_data = &r8a66597_data, }, .num_resources = ARRAY_SIZE(usb_host_resources), .resource = usb_host_resources, }; static struct uio_info vpu_platform_data = { .name = "VPU5", .version = "0", .irq = 60, }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe902807, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), }; static struct uio_info veu0_platform_data = { .name = "VEU", .version = "0", .irq = 54, }; static struct resource veu0_resources[] = { [0] = { .name = "VEU(1)", .start = 0xfe920000, .end = 0xfe9200b7, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu0_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu0_platform_data, }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), }; static struct uio_info veu1_platform_data = { .name = "VEU", .version = "0", .irq = 27, }; static struct resource veu1_resources[] = { [0] = { .name = "VEU(2)", .start = 0xfe924000, .end = 0xfe9240b7, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu1_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &veu1_platform_data, }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), }; static struct sh_timer_config cmt_platform_data = { .channel_offset = 0x60, .timer_bit = 5, .clockevent_rating = 125, .clocksource_rating = 200, }; static struct resource cmt_resources[] = { [0] = { .start = 0x044a0060, .end = 0x044a006b, .flags = IORESOURCE_MEM, }, [1] = { .start = 104, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt_platform_data, }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 17, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002b, .flags = IORESOURCE_MEM, }, [1] = { .start = 18, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct platform_device *sh7366_devices[] __initdata = { &scif0_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, &iic_device, &usb_host_device, &vpu_device, &veu0_device, &veu1_device, }; static int __init sh7366_devices_setup(void) { platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); return platform_add_devices(sh7366_devices, ARRAY_SIZE(sh7366_devices)); } arch_initcall(sh7366_devices_setup); static struct platform_device *sh7366_early_devices[] __initdata = { &scif0_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7366_early_devices, ARRAY_SIZE(sh7366_early_devices)); } enum { UNUSED=0, ENABLED, DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, ICB, DMAC0, DMAC1, DMAC2, DMAC3, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, MFI, VPU, USB, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I, DMAC4, DMAC5, DMAC_DADERR, SCIF, SCIFA1, SCIFA2, DENC, MSIOF, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, SDHI, CMT, TSIF, SIU, TMU0, TMU1, TMU2, VEU2, LCDC, /* interrupt groups */ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), INTC_VECT(ICB, 0x700), INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20), INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20), INTC_VECT(MMC_MMC3I, 0xb40), INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20), INTC_VECT(SCIFA2, 0xc40), INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80), INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), INTC_VECT(SIU, 0xf80), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580), }; static struct intc_group groups[] __initdata = { INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I), INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ { } }, { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ { 0, 0, 0, VPU, 0, 0, 0, MFI } }, { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ { 0, 0, 0, ICB } }, { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } }, { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } }, { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ { 0, 0, 0, 0, 0, 0, 0, MSIOF } }, { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB, } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } }, { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ { 0, 0, 0, 0, 0, 0, 0, TSIF } }, { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } }, { 0xa4080008, 0, 16, 4, /* IPRC */ { } }, { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } }, { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } }, { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } }, { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } }, { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } }, { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, { 0xa408002c, 0, 16, 4, /* IPRL */ { } }, { 0xa4140010, 0, 32, 4, /* INTPRI00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_sense_reg sense_registers[] __initdata = { { 0xa414001c, 16, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_mask_reg ack_registers[] __initdata = { { 0xa4140024, 0, 8, /* INTREQ00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_desc intc_desc __initdata = { .name = "sh7366", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(vectors, groups, mask_registers, prio_registers, sense_registers, ack_registers), }; void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } void __init plat_mem_setup(void) { /* TODO: Register Node 1 */ }
gpl-2.0
tilaksidduram/Stock_kernel
drivers/ata/pata_marvell.c
7878
4541
/* * Marvell PATA driver. * * For the moment we drive the PATA port in legacy mode. That * isn't making full use of the device functionality but it is * easy to get working. * * (c) 2006 Red Hat */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_marvell" #define DRV_VERSION "0.1.6" /** * marvell_pata_active - check if PATA is active * @pdev: PCI device * * Returns 1 if the PATA port may be active. We know how to check this * for the 6145 but not the other devices */ static int marvell_pata_active(struct pci_dev *pdev) { int i; u32 devices; void __iomem *barp; /* We don't yet know how to do this for other devices */ if (pdev->device != 0x6145) return 1; barp = pci_iomap(pdev, 5, 0x10); if (barp == NULL) return -ENOMEM; printk("BAR5:"); for(i = 0; i <= 0x0F; i++) printk("%02X:%02X ", i, ioread8(barp + i)); printk("\n"); devices = ioread32(barp + 0x0C); pci_iounmap(pdev, barp); if (devices & 0x10) return 1; return 0; } /** * marvell_pre_reset - probe begin * @link: link * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. */ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (pdev->device == 0x6145 && ap->port_no == 0 && !marvell_pata_active(pdev)) /* PATA enable ? */ return -ENOENT; return ata_sff_prereset(link, deadline); } static int marvell_cable_detect(struct ata_port *ap) { /* Cable type */ switch(ap->port_no) { case 0: if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) return ATA_CBL_PATA40; return ATA_CBL_PATA80; case 1: /* Legacy SATA port */ return ATA_CBL_SATA; } BUG(); return 0; /* Our BUG macro needs the right markup */ } /* No PIO or DMA methods needed for this device */ static struct scsi_host_template marvell_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations marvell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = marvell_cable_detect, .prereset = marvell_pre_reset, }; /** * marvell_init_one - Register Marvell ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in marvell_pci_tbl matching with @pdev * * Called from kernel PCI layer. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &marvell_ops, }; static const struct ata_port_info info_sata = { /* Slave possible as its magically mapped not real */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &marvell_ops, }; const struct ata_port_info *ppi[] = { &info, &info_sata }; if (pdev->device == 0x6101) ppi[1] = &ata_dummy_port_info; #if defined(CONFIG_SATA_AHCI) || defined(CONFIG_SATA_AHCI_MODULE) if (!marvell_pata_active(pdev)) { printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); return -ENODEV; } #endif return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); } static const struct pci_device_id marvell_pci_tbl[] = { { PCI_DEVICE(0x11AB, 0x6101), }, { PCI_DEVICE(0x11AB, 0x6121), }, { PCI_DEVICE(0x11AB, 0x6123), }, { PCI_DEVICE(0x11AB, 0x6145), }, { PCI_DEVICE(0x1B4B, 0x91A0), }, { PCI_DEVICE(0x1B4B, 0x91A4), }, { } /* terminate list */ }; static struct pci_driver marvell_pci_driver = { .name = DRV_NAME, .id_table = marvell_pci_tbl, .probe = marvell_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init marvell_init(void) { return pci_register_driver(&marvell_pci_driver); } static void __exit marvell_exit(void) { pci_unregister_driver(&marvell_pci_driver); } module_init(marvell_init); module_exit(marvell_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, marvell_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
sparkma/android_kernel_mtk6577_common
fs/ecryptfs/messaging.c
8134
17771
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2004-2008 International Business Machines Corp. * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> * Tyler Hicks <tyhicks@ou.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/nsproxy.h> #include "ecryptfs_kernel.h" static LIST_HEAD(ecryptfs_msg_ctx_free_list); static LIST_HEAD(ecryptfs_msg_ctx_alloc_list); static struct mutex ecryptfs_msg_ctx_lists_mux; static struct hlist_head *ecryptfs_daemon_hash; struct mutex ecryptfs_daemon_hash_mux; static int ecryptfs_hash_bits; #define ecryptfs_uid_hash(uid) \ hash_long((unsigned long)uid, ecryptfs_hash_bits) static u32 ecryptfs_msg_counter; static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; /** * ecryptfs_acquire_free_msg_ctx * @msg_ctx: The context that was acquired from the free list * * Acquires a context element from the free list and locks the mutex * on the context. Sets the msg_ctx task to current. Returns zero on * success; non-zero on error or upon failure to acquire a free * context element. Must be called with ecryptfs_msg_ctx_lists_mux * held. */ static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx) { struct list_head *p; int rc; if (list_empty(&ecryptfs_msg_ctx_free_list)) { printk(KERN_WARNING "%s: The eCryptfs free " "context list is empty. It may be helpful to " "specify the ecryptfs_message_buf_len " "parameter to be greater than the current " "value of [%d]\n", __func__, ecryptfs_message_buf_len); rc = -ENOMEM; goto out; } list_for_each(p, &ecryptfs_msg_ctx_free_list) { *msg_ctx = list_entry(p, struct ecryptfs_msg_ctx, node); if (mutex_trylock(&(*msg_ctx)->mux)) { (*msg_ctx)->task = current; rc = 0; goto out; } } rc = -ENOMEM; out: return rc; } /** * ecryptfs_msg_ctx_free_to_alloc * @msg_ctx: The context to move from the free list to the alloc list * * Must be called with ecryptfs_msg_ctx_lists_mux held. */ static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx) { list_move(&msg_ctx->node, &ecryptfs_msg_ctx_alloc_list); msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_PENDING; msg_ctx->counter = ++ecryptfs_msg_counter; } /** * ecryptfs_msg_ctx_alloc_to_free * @msg_ctx: The context to move from the alloc list to the free list * * Must be called with ecryptfs_msg_ctx_lists_mux held. */ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) { list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list); if (msg_ctx->msg) kfree(msg_ctx->msg); msg_ctx->msg = NULL; msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE; } /** * ecryptfs_find_daemon_by_euid * @euid: The effective user id which maps to the desired daemon id * @user_ns: The namespace in which @euid applies * @daemon: If return value is zero, points to the desired daemon pointer * * Must be called with ecryptfs_daemon_hash_mux held. * * Search the hash list for the given user id. * * Returns zero if the user id exists in the list; non-zero otherwise. */ int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, struct user_namespace *user_ns) { struct hlist_node *elem; int rc; hlist_for_each_entry(*daemon, elem, &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)], euid_chain) { if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) { rc = 0; goto out; } } rc = -EINVAL; out: return rc; } /** * ecryptfs_spawn_daemon - Create and initialize a new daemon struct * @daemon: Pointer to set to newly allocated daemon struct * @euid: Effective user id for the daemon * @user_ns: The namespace in which @euid applies * @pid: Process id for the daemon * * Must be called ceremoniously while in possession of * ecryptfs_sacred_daemon_hash_mux * * Returns zero on success; non-zero otherwise */ int ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, struct user_namespace *user_ns, struct pid *pid) { int rc = 0; (*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL); if (!(*daemon)) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of " "GFP_KERNEL memory\n", __func__, sizeof(**daemon)); goto out; } (*daemon)->euid = euid; (*daemon)->user_ns = get_user_ns(user_ns); (*daemon)->pid = get_pid(pid); (*daemon)->task = current; mutex_init(&(*daemon)->mux); INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue); init_waitqueue_head(&(*daemon)->wait); (*daemon)->num_queued_msg_ctx = 0; hlist_add_head(&(*daemon)->euid_chain, &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]); out: return rc; } /** * ecryptfs_exorcise_daemon - Destroy the daemon struct * * Must be called ceremoniously while in possession of * ecryptfs_daemon_hash_mux and the daemon's own mux. */ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) { struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp; int rc = 0; mutex_lock(&daemon->mux); if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ) || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) { rc = -EBUSY; printk(KERN_WARNING "%s: Attempt to destroy daemon with pid " "[0x%p], but it is in the midst of a read or a poll\n", __func__, daemon->pid); mutex_unlock(&daemon->mux); goto out; } list_for_each_entry_safe(msg_ctx, msg_ctx_tmp, &daemon->msg_ctx_out_queue, daemon_out_list) { list_del(&msg_ctx->daemon_out_list); daemon->num_queued_msg_ctx--; printk(KERN_WARNING "%s: Warning: dropping message that is in " "the out queue of a dying daemon\n", __func__); ecryptfs_msg_ctx_alloc_to_free(msg_ctx); } hlist_del(&daemon->euid_chain); if (daemon->task) wake_up_process(daemon->task); if (daemon->pid) put_pid(daemon->pid); if (daemon->user_ns) put_user_ns(daemon->user_ns); mutex_unlock(&daemon->mux); kzfree(daemon); out: return rc; } /** * ecryptfs_process_quit * @euid: The user ID owner of the message * @user_ns: The namespace in which @euid applies * @pid: The process ID for the userspace program that sent the * message * * Deletes the corresponding daemon for the given euid and pid, if * it is the registered that is requesting the deletion. Returns zero * after deleting the desired daemon; non-zero otherwise. */ int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, struct pid *pid) { struct ecryptfs_daemon *daemon; int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns); if (rc || !daemon) { rc = -EINVAL; printk(KERN_ERR "Received request from user [%d] to " "unregister unrecognized daemon [0x%p]\n", euid, pid); goto out_unlock; } rc = ecryptfs_exorcise_daemon(daemon); out_unlock: mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } /** * ecryptfs_process_reponse * @msg: The ecryptfs message received; the caller should sanity check * msg->data_len and free the memory * @pid: The process ID of the userspace application that sent the * message * @seq: The sequence number of the message; must match the sequence * number for the existing message context waiting for this * response * * Processes a response message after sending an operation request to * userspace. Some other process is awaiting this response. Before * sending out its first communications, the other process allocated a * msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The * response message contains this index so that we can copy over the * response message into the msg_ctx that the process holds a * reference to. The other process is going to wake up, check to see * that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then * proceed to read off and process the response message. Returns zero * upon delivery to desired context element; non-zero upon delivery * failure or error. * * Returns zero on success; non-zero otherwise */ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, struct user_namespace *user_ns, struct pid *pid, u32 seq) { struct ecryptfs_daemon *uninitialized_var(daemon); struct ecryptfs_msg_ctx *msg_ctx; size_t msg_size; struct nsproxy *nsproxy; struct user_namespace *tsk_user_ns; uid_t ctx_euid; int rc; if (msg->index >= ecryptfs_message_buf_len) { rc = -EINVAL; printk(KERN_ERR "%s: Attempt to reference " "context buffer at index [%d]; maximum " "allowable is [%d]\n", __func__, msg->index, (ecryptfs_message_buf_len - 1)); goto out; } msg_ctx = &ecryptfs_msg_ctx_arr[msg->index]; mutex_lock(&msg_ctx->mux); mutex_lock(&ecryptfs_daemon_hash_mux); rcu_read_lock(); nsproxy = task_nsproxy(msg_ctx->task); if (nsproxy == NULL) { rc = -EBADMSG; printk(KERN_ERR "%s: Receiving process is a zombie. Dropping " "message.\n", __func__); rcu_read_unlock(); mutex_unlock(&ecryptfs_daemon_hash_mux); goto wake_up; } tsk_user_ns = __task_cred(msg_ctx->task)->user->user_ns; ctx_euid = task_euid(msg_ctx->task); rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, tsk_user_ns); rcu_read_unlock(); mutex_unlock(&ecryptfs_daemon_hash_mux); if (rc) { rc = -EBADMSG; printk(KERN_WARNING "%s: User [%d] received a " "message response from process [0x%p] but does " "not have a registered daemon\n", __func__, ctx_euid, pid); goto wake_up; } if (ctx_euid != euid) { rc = -EBADMSG; printk(KERN_WARNING "%s: Received message from user " "[%d]; expected message from user [%d]\n", __func__, euid, ctx_euid); goto unlock; } if (tsk_user_ns != user_ns) { rc = -EBADMSG; printk(KERN_WARNING "%s: Received message from user_ns " "[0x%p]; expected message from user_ns [0x%p]\n", __func__, user_ns, tsk_user_ns); goto unlock; } if (daemon->pid != pid) { rc = -EBADMSG; printk(KERN_ERR "%s: User [%d] sent a message response " "from an unrecognized process [0x%p]\n", __func__, ctx_euid, pid); goto unlock; } if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { rc = -EINVAL; printk(KERN_WARNING "%s: Desired context element is not " "pending a response\n", __func__); goto unlock; } else if (msg_ctx->counter != seq) { rc = -EINVAL; printk(KERN_WARNING "%s: Invalid message sequence; " "expected [%d]; received [%d]\n", __func__, msg_ctx->counter, seq); goto unlock; } msg_size = (sizeof(*msg) + msg->data_len); msg_ctx->msg = kmalloc(msg_size, GFP_KERNEL); if (!msg_ctx->msg) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of " "GFP_KERNEL memory\n", __func__, msg_size); goto unlock; } memcpy(msg_ctx->msg, msg, msg_size); msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE; rc = 0; wake_up: wake_up_process(msg_ctx->task); unlock: mutex_unlock(&msg_ctx->mux); out: return rc; } /** * ecryptfs_send_message_locked * @data: The data to send * @data_len: The length of data * @msg_ctx: The message context allocated for the send * * Must be called with ecryptfs_daemon_hash_mux held. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx) { struct ecryptfs_daemon *daemon; uid_t euid = current_euid(); int rc; rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); if (rc || !daemon) { rc = -ENOTCONN; printk(KERN_ERR "%s: User [%d] does not have a daemon " "registered\n", __func__, euid); goto out; } mutex_lock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_acquire_free_msg_ctx(msg_ctx); if (rc) { mutex_unlock(&ecryptfs_msg_ctx_lists_mux); printk(KERN_WARNING "%s: Could not claim a free " "context element\n", __func__); goto out; } ecryptfs_msg_ctx_free_to_alloc(*msg_ctx); mutex_unlock(&(*msg_ctx)->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type, 0, daemon); if (rc) printk(KERN_ERR "%s: Error attempting to send message to " "userspace daemon; rc = [%d]\n", __func__, rc); out: return rc; } /** * ecryptfs_send_message * @data: The data to send * @data_len: The length of data * @msg_ctx: The message context allocated for the send * * Grabs ecryptfs_daemon_hash_mux. * * Returns zero on success; non-zero otherwise */ int ecryptfs_send_message(char *data, int data_len, struct ecryptfs_msg_ctx **msg_ctx) { int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_send_message_locked(data, data_len, ECRYPTFS_MSG_REQUEST, msg_ctx); mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } /** * ecryptfs_wait_for_response * @msg_ctx: The context that was assigned when sending a message * @msg: The incoming message from userspace; not set if rc != 0 * * Sleeps until awaken by ecryptfs_receive_message or until the amount * of time exceeds ecryptfs_message_wait_timeout. If zero is * returned, msg will point to a valid message from userspace; a * non-zero value is returned upon failure to receive a message or an * error occurs. Callee must free @msg on success. */ int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, struct ecryptfs_message **msg) { signed long timeout = ecryptfs_message_wait_timeout * HZ; int rc = 0; sleep: timeout = schedule_timeout_interruptible(timeout); mutex_lock(&ecryptfs_msg_ctx_lists_mux); mutex_lock(&msg_ctx->mux); if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_DONE) { if (timeout) { mutex_unlock(&msg_ctx->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); goto sleep; } rc = -ENOMSG; } else { *msg = msg_ctx->msg; msg_ctx->msg = NULL; } ecryptfs_msg_ctx_alloc_to_free(msg_ctx); mutex_unlock(&msg_ctx->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); return rc; } int __init ecryptfs_init_messaging(void) { int i; int rc = 0; if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) { ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS; printk(KERN_WARNING "%s: Specified number of users is " "too large, defaulting to [%d] users\n", __func__, ecryptfs_number_of_users); } mutex_init(&ecryptfs_daemon_hash_mux); mutex_lock(&ecryptfs_daemon_hash_mux); ecryptfs_hash_bits = 1; while (ecryptfs_number_of_users >> ecryptfs_hash_bits) ecryptfs_hash_bits++; ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head) * (1 << ecryptfs_hash_bits)), GFP_KERNEL); if (!ecryptfs_daemon_hash) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); mutex_unlock(&ecryptfs_daemon_hash_mux); goto out; } for (i = 0; i < (1 << ecryptfs_hash_bits); i++) INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]); mutex_unlock(&ecryptfs_daemon_hash_mux); ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) * ecryptfs_message_buf_len), GFP_KERNEL); if (!ecryptfs_msg_ctx_arr) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); goto out; } mutex_init(&ecryptfs_msg_ctx_lists_mux); mutex_lock(&ecryptfs_msg_ctx_lists_mux); ecryptfs_msg_counter = 0; for (i = 0; i < ecryptfs_message_buf_len; i++) { INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node); INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list); mutex_init(&ecryptfs_msg_ctx_arr[i].mux); mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); ecryptfs_msg_ctx_arr[i].index = i; ecryptfs_msg_ctx_arr[i].state = ECRYPTFS_MSG_CTX_STATE_FREE; ecryptfs_msg_ctx_arr[i].counter = 0; ecryptfs_msg_ctx_arr[i].task = NULL; ecryptfs_msg_ctx_arr[i].msg = NULL; list_add_tail(&ecryptfs_msg_ctx_arr[i].node, &ecryptfs_msg_ctx_free_list); mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux); } mutex_unlock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_init_ecryptfs_miscdev(); if (rc) ecryptfs_release_messaging(); out: return rc; } void ecryptfs_release_messaging(void) { if (ecryptfs_msg_ctx_arr) { int i; mutex_lock(&ecryptfs_msg_ctx_lists_mux); for (i = 0; i < ecryptfs_message_buf_len; i++) { mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); if (ecryptfs_msg_ctx_arr[i].msg) kfree(ecryptfs_msg_ctx_arr[i].msg); mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux); } kfree(ecryptfs_msg_ctx_arr); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); } if (ecryptfs_daemon_hash) { struct hlist_node *elem; struct ecryptfs_daemon *daemon; int i; mutex_lock(&ecryptfs_daemon_hash_mux); for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { int rc; hlist_for_each_entry(daemon, elem, &ecryptfs_daemon_hash[i], euid_chain) { rc = ecryptfs_exorcise_daemon(daemon); if (rc) printk(KERN_ERR "%s: Error whilst " "attempting to destroy daemon; " "rc = [%d]. Dazed and confused, " "but trying to continue.\n", __func__, rc); } } kfree(ecryptfs_daemon_hash); mutex_unlock(&ecryptfs_daemon_hash_mux); } ecryptfs_destroy_ecryptfs_miscdev(); return; }
gpl-2.0
Abhinav1997/kernel_sony_msm8930
arch/sh/kernel/cpu/sh4/sq.c
8390
9697
/* * arch/sh/kernel/cpu/sh4/sq.c * * General management API for SH-4 integrated Store Queues * * Copyright (C) 2001 - 2006 Paul Mundt * Copyright (C) 2001, 2002 M. R. Brown * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/cpu.h> #include <linux/bitmap.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/prefetch.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <cpu/sq.h> struct sq_mapping; struct sq_mapping { const char *name; unsigned long sq_addr; unsigned long addr; unsigned int size; struct sq_mapping *next; }; static struct sq_mapping *sq_mapping_list; static DEFINE_SPINLOCK(sq_mapping_lock); static struct kmem_cache *sq_cache; static unsigned long *sq_bitmap; #define store_queue_barrier() \ do { \ (void)__raw_readl(P4SEG_STORE_QUE); \ __raw_writel(0, P4SEG_STORE_QUE + 0); \ __raw_writel(0, P4SEG_STORE_QUE + 8); \ } while (0); /** * sq_flush_range - Flush (prefetch) a specific SQ range * @start: the store queue address to start flushing from * @len: the length to flush * * Flushes the store queue cache from @start to @start + @len in a * linear fashion. */ void sq_flush_range(unsigned long start, unsigned int len) { unsigned long *sq = (unsigned long *)start; /* Flush the queues */ for (len >>= 5; len--; sq += 8) prefetchw(sq); /* Wait for completion */ store_queue_barrier(); } EXPORT_SYMBOL(sq_flush_range); static inline void sq_mapping_list_add(struct sq_mapping *map) { struct sq_mapping **p, *tmp; spin_lock_irq(&sq_mapping_lock); p = &sq_mapping_list; while ((tmp = *p) != NULL) p = &tmp->next; map->next = tmp; *p = map; spin_unlock_irq(&sq_mapping_lock); } static inline void sq_mapping_list_del(struct sq_mapping *map) { struct sq_mapping **p, *tmp; spin_lock_irq(&sq_mapping_lock); for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next) if (tmp == map) { *p = tmp->next; break; } spin_unlock_irq(&sq_mapping_lock); } static int __sq_remap(struct sq_mapping *map, pgprot_t prot) { #if defined(CONFIG_MMU) struct vm_struct *vma; vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX); if (!vma) return -ENOMEM; vma->phys_addr = map->addr; if (ioremap_page_range((unsigned long)vma->addr, (unsigned long)vma->addr + map->size, vma->phys_addr, prot)) { vunmap(vma->addr); return -EAGAIN; } #else /* * Without an MMU (or with it turned off), this is much more * straightforward, as we can just load up each queue's QACR with * the physical address appropriately masked. */ __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); #endif return 0; } /** * sq_remap - Map a physical address through the Store Queues * @phys: Physical address of mapping. * @size: Length of mapping. * @name: User invoking mapping. * @prot: Protection bits. * * Remaps the physical address @phys through the next available store queue * address of @size length. @name is logged at boot time as well as through * the sysfs interface. */ unsigned long sq_remap(unsigned long phys, unsigned int size, const char *name, pgprot_t prot) { struct sq_mapping *map; unsigned long end; unsigned int psz; int ret, page; /* Don't allow wraparound or zero size */ end = phys + size - 1; if (unlikely(!size || end < phys)) return -EINVAL; /* Don't allow anyone to remap normal memory.. */ if (unlikely(phys < virt_to_phys(high_memory))) return -EINVAL; phys &= PAGE_MASK; size = PAGE_ALIGN(end + 1) - phys; map = kmem_cache_alloc(sq_cache, GFP_KERNEL); if (unlikely(!map)) return -ENOMEM; map->addr = phys; map->size = size; map->name = name; page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT, get_order(map->size)); if (unlikely(page < 0)) { ret = -ENOSPC; goto out; } map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); ret = __sq_remap(map, prot); if (unlikely(ret != 0)) goto out; psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT; pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n", likely(map->name) ? map->name : "???", psz, psz == 1 ? " " : "s", map->sq_addr, map->addr); sq_mapping_list_add(map); return map->sq_addr; out: kmem_cache_free(sq_cache, map); return ret; } EXPORT_SYMBOL(sq_remap); /** * sq_unmap - Unmap a Store Queue allocation * @vaddr: Pre-allocated Store Queue mapping. * * Unmaps the store queue allocation @map that was previously created by * sq_remap(). Also frees up the pte that was previously inserted into * the kernel page table and discards the UTLB translation. */ void sq_unmap(unsigned long vaddr) { struct sq_mapping **p, *map; int page; for (p = &sq_mapping_list; (map = *p); p = &map->next) if (map->sq_addr == vaddr) break; if (unlikely(!map)) { printk("%s: bad store queue address 0x%08lx\n", __func__, vaddr); return; } page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT; bitmap_release_region(sq_bitmap, page, get_order(map->size)); #ifdef CONFIG_MMU { /* * Tear down the VMA in the MMU case. */ struct vm_struct *vma; vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); if (!vma) { printk(KERN_ERR "%s: bad address 0x%08lx\n", __func__, map->sq_addr); return; } } #endif sq_mapping_list_del(map); kmem_cache_free(sq_cache, map); } EXPORT_SYMBOL(sq_unmap); /* * Needlessly complex sysfs interface. Unfortunately it doesn't seem like * there is any other easy way to add things on a per-cpu basis without * putting the directory entries somewhere stupid and having to create * links in sysfs by hand back in to the per-cpu directories. * * Some day we may want to have an additional abstraction per store * queue, but considering the kobject hell we already have to deal with, * it's simply not worth the trouble. */ static struct kobject *sq_kobject[NR_CPUS]; struct sq_sysfs_attr { struct attribute attr; ssize_t (*show)(char *buf); ssize_t (*store)(const char *buf, size_t count); }; #define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr) static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); if (likely(sattr->show)) return sattr->show(buf); return -EIO; } static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); if (likely(sattr->store)) return sattr->store(buf, count); return -EIO; } static ssize_t mapping_show(char *buf) { struct sq_mapping **list, *entry; char *p = buf; for (list = &sq_mapping_list; (entry = *list); list = &entry->next) p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr, entry->sq_addr + entry->size, entry->addr, entry->name); return p - buf; } static ssize_t mapping_store(const char *buf, size_t count) { unsigned long base = 0, len = 0; sscanf(buf, "%lx %lx", &base, &len); if (!base) return -EIO; if (likely(len)) { int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); if (ret < 0) return ret; } else sq_unmap(base); return count; } static struct sq_sysfs_attr mapping_attr = __ATTR(mapping, 0644, mapping_show, mapping_store); static struct attribute *sq_sysfs_attrs[] = { &mapping_attr.attr, NULL, }; static const struct sysfs_ops sq_sysfs_ops = { .show = sq_sysfs_show, .store = sq_sysfs_store, }; static struct kobj_type ktype_percpu_entry = { .sysfs_ops = &sq_sysfs_ops, .default_attrs = sq_sysfs_attrs, }; static int sq_dev_add(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; struct kobject *kobj; int error; sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (unlikely(!sq_kobject[cpu])) return -ENOMEM; kobj = sq_kobject[cpu]; error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj, "%s", "sq"); if (!error) kobject_uevent(kobj, KOBJ_ADD); return error; } static int sq_dev_remove(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; struct kobject *kobj = sq_kobject[cpu]; kobject_put(kobj); return 0; } static struct subsys_interface sq_interface = { .name = "sq", .subsys = &cpu_subsys, .add_dev = sq_dev_add, .remove_dev = sq_dev_remove, }; static int __init sq_api_init(void) { unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; int ret = -ENOMEM; printk(KERN_NOTICE "sq: Registering store queue API.\n"); sq_cache = kmem_cache_create("store_queue_cache", sizeof(struct sq_mapping), 0, 0, NULL); if (unlikely(!sq_cache)) return ret; sq_bitmap = kzalloc(size, GFP_KERNEL); if (unlikely(!sq_bitmap)) goto out; ret = subsys_interface_register(&sq_interface); if (unlikely(ret != 0)) goto out; return 0; out: kfree(sq_bitmap); kmem_cache_destroy(sq_cache); return ret; } static void __exit sq_api_exit(void) { subsys_interface_unregister(&sq_interface); kfree(sq_bitmap); kmem_cache_destroy(sq_cache); } module_init(sq_api_init); module_exit(sq_api_exit); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); MODULE_LICENSE("GPL");
gpl-2.0
Maxr1998/hellsCore-mako
arch/x86/boot/video-mode.c
13254
3897
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007-2008 rPath, Inc. - All Rights Reserved * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ /* * arch/i386/boot/video-mode.c * * Set the video mode. This is separated out into a different * file in order to be shared with the ACPI wakeup code. */ #include "boot.h" #include "video.h" #include "vesa.h" /* * Common variables */ int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */ u16 video_segment; int force_x, force_y; /* Don't query the BIOS for cols/rows */ int do_restore; /* Screen contents changed during mode flip */ int graphic_mode; /* Graphic mode with linear frame buffer */ /* Probe the video drivers and have them generate their mode lists. */ void probe_cards(int unsafe) { struct card_info *card; static u8 probed[2]; if (probed[unsafe]) return; probed[unsafe] = 1; for (card = video_cards; card < video_cards_end; card++) { if (card->unsafe == unsafe) { if (card->probe) card->nmodes = card->probe(); else card->nmodes = 0; } } } /* Test if a mode is defined */ int mode_defined(u16 mode) { struct card_info *card; struct mode_info *mi; int i; for (card = video_cards; card < video_cards_end; card++) { mi = card->modes; for (i = 0; i < card->nmodes; i++, mi++) { if (mi->mode == mode) return 1; } } return 0; } /* Set mode (without recalc) */ static int raw_set_mode(u16 mode, u16 *real_mode) { int nmode, i; struct card_info *card; struct mode_info *mi; /* Drop the recalc bit if set */ mode &= ~VIDEO_RECALC; /* Scan for mode based on fixed ID, position, or resolution */ nmode = 0; for (card = video_cards; card < video_cards_end; card++) { mi = card->modes; for (i = 0; i < card->nmodes; i++, mi++) { int visible = mi->x || mi->y; if ((mode == nmode && visible) || mode == mi->mode || mode == (mi->y << 8)+mi->x) { *real_mode = mi->mode; return card->set_mode(mi); } if (visible) nmode++; } } /* Nothing found? Is it an "exceptional" (unprobed) mode? */ for (card = video_cards; card < video_cards_end; card++) { if (mode >= card->xmode_first && mode < card->xmode_first+card->xmode_n) { struct mode_info mix; *real_mode = mix.mode = mode; mix.x = mix.y = 0; return card->set_mode(&mix); } } /* Otherwise, failure... */ return -1; } /* * Recalculate the vertical video cutoff (hack!) */ static void vga_recalc_vertical(void) { unsigned int font_size, rows; u16 crtc; u8 pt, ov; set_fs(0); font_size = rdfs8(0x485); /* BIOS: font size (pixels) */ rows = force_y ? force_y : rdfs8(0x484)+1; /* Text rows */ rows *= font_size; /* Visible scan lines */ rows--; /* ... minus one */ crtc = vga_crtc(); pt = in_idx(crtc, 0x11); pt &= ~0x80; /* Unlock CR0-7 */ out_idx(pt, crtc, 0x11); out_idx((u8)rows, crtc, 0x12); /* Lower height register */ ov = in_idx(crtc, 0x07); /* Overflow register */ ov &= 0xbd; ov |= (rows >> (8-1)) & 0x02; ov |= (rows >> (9-6)) & 0x40; out_idx(ov, crtc, 0x07); } /* Set mode (with recalc if specified) */ int set_mode(u16 mode) { int rv; u16 real_mode; /* Very special mode numbers... */ if (mode == VIDEO_CURRENT_MODE) return 0; /* Nothing to do... */ else if (mode == NORMAL_VGA) mode = VIDEO_80x25; else if (mode == EXTENDED_VGA) mode = VIDEO_8POINT; rv = raw_set_mode(mode, &real_mode); if (rv) return rv; if (mode & VIDEO_RECALC) vga_recalc_vertical(); /* Save the canonical mode number for the kernel, not an alias, size specification or menu position */ #ifndef _WAKEUP boot_params.hdr.vid_mode = real_mode; #endif return 0; }
gpl-2.0
Split-Screen/android_kernel_motorola_msm8916
drivers/pci/pcie/aer/ecrc.c
14022
3302
/* * Enables/disables PCIe ECRC checking. * * (C) Copyright 2009 Hewlett-Packard Development Company, L.P. * Andrew Patterson <andrew.patterson@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/pci_regs.h> #include <linux/errno.h> #include "../../pci.h" #define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */ #define ECRC_POLICY_OFF 1 /* ECRC off for performance */ #define ECRC_POLICY_ON 2 /* ECRC on for data integrity */ static int ecrc_policy = ECRC_POLICY_DEFAULT; static const char *ecrc_policy_str[] = { [ECRC_POLICY_DEFAULT] = "bios", [ECRC_POLICY_OFF] = "off", [ECRC_POLICY_ON] = "on" }; /** * enable_ercr_checking - enable PCIe ECRC checking for a device * @dev: the PCI device * * Returns 0 on success, or negative on failure. */ static int enable_ecrc_checking(struct pci_dev *dev) { int pos; u32 reg32; if (!pci_is_pcie(dev)) return -ENODEV; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -ENODEV; pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32); if (reg32 & PCI_ERR_CAP_ECRC_GENC) reg32 |= PCI_ERR_CAP_ECRC_GENE; if (reg32 & PCI_ERR_CAP_ECRC_CHKC) reg32 |= PCI_ERR_CAP_ECRC_CHKE; pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); return 0; } /** * disable_ercr_checking - disables PCIe ECRC checking for a device * @dev: the PCI device * * Returns 0 on success, or negative on failure. */ static int disable_ecrc_checking(struct pci_dev *dev) { int pos; u32 reg32; if (!pci_is_pcie(dev)) return -ENODEV; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -ENODEV; pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32); reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); return 0; } /** * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy * @dev: the PCI device */ void pcie_set_ecrc_checking(struct pci_dev *dev) { switch (ecrc_policy) { case ECRC_POLICY_DEFAULT: return; case ECRC_POLICY_OFF: disable_ecrc_checking(dev); break; case ECRC_POLICY_ON: enable_ecrc_checking(dev); break; default: return; } } /** * pcie_ecrc_get_policy - parse kernel command-line ecrc option */ void pcie_ecrc_get_policy(char *str) { int i; for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++) if (!strncmp(str, ecrc_policy_str[i], strlen(ecrc_policy_str[i]))) break; if (i >= ARRAY_SIZE(ecrc_policy_str)) return; ecrc_policy = i; }
gpl-2.0
bhadram/linux
sound/soc/ux500/ux500_msp_dai.c
199
22706
/* * Copyright (C) ST-Ericsson SA 2012 * * Author: Ola Lilja <ola.o.lilja@stericsson.com>, * Roger Nilsson <roger.xr.nilsson@stericsson.com> * for ST-Ericsson. * * License terms: * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/mfd/dbx500-prcmu.h> #include <linux/platform_data/asoc-ux500-msp.h> #include <sound/soc.h> #include <sound/soc-dai.h> #include <sound/dmaengine_pcm.h> #include "ux500_msp_i2s.h" #include "ux500_msp_dai.h" #include "ux500_pcm.h" static int setup_pcm_multichan(struct snd_soc_dai *dai, struct ux500_msp_config *msp_config) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); struct msp_multichannel_config *multi = &msp_config->multichannel_config; if (drvdata->slots > 1) { msp_config->multichannel_configured = 1; multi->tx_multichannel_enable = true; multi->rx_multichannel_enable = true; multi->rx_comparison_enable_mode = MSP_COMPARISON_DISABLED; multi->tx_channel_0_enable = drvdata->tx_mask; multi->tx_channel_1_enable = 0; multi->tx_channel_2_enable = 0; multi->tx_channel_3_enable = 0; multi->rx_channel_0_enable = drvdata->rx_mask; multi->rx_channel_1_enable = 0; multi->rx_channel_2_enable = 0; multi->rx_channel_3_enable = 0; dev_dbg(dai->dev, "%s: Multichannel enabled. Slots: %d, TX: %u, RX: %u\n", __func__, drvdata->slots, multi->tx_channel_0_enable, multi->rx_channel_0_enable); } return 0; } static int setup_frameper(struct snd_soc_dai *dai, unsigned int rate, struct msp_protdesc *prot_desc) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); switch (drvdata->slots) { case 1: switch (rate) { case 8000: prot_desc->frame_period = FRAME_PER_SINGLE_SLOT_8_KHZ; break; case 16000: prot_desc->frame_period = FRAME_PER_SINGLE_SLOT_16_KHZ; break; case 44100: prot_desc->frame_period = FRAME_PER_SINGLE_SLOT_44_1_KHZ; break; case 48000: prot_desc->frame_period = FRAME_PER_SINGLE_SLOT_48_KHZ; break; default: dev_err(dai->dev, "%s: Error: Unsupported sample-rate (freq = %d)!\n", __func__, rate); return -EINVAL; } break; case 2: prot_desc->frame_period = FRAME_PER_2_SLOTS; break; case 8: prot_desc->frame_period = FRAME_PER_8_SLOTS; break; case 16: prot_desc->frame_period = FRAME_PER_16_SLOTS; break; default: dev_err(dai->dev, "%s: Error: Unsupported slot-count (slots = %d)!\n", __func__, drvdata->slots); return -EINVAL; } prot_desc->clocks_per_frame = prot_desc->frame_period+1; dev_dbg(dai->dev, "%s: Clocks per frame: %u\n", __func__, prot_desc->clocks_per_frame); return 0; } static int setup_pcm_framing(struct snd_soc_dai *dai, unsigned int rate, struct msp_protdesc *prot_desc) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); u32 frame_length = MSP_FRAME_LEN_1; prot_desc->frame_width = 0; switch (drvdata->slots) { case 1: frame_length = MSP_FRAME_LEN_1; break; case 2: frame_length = MSP_FRAME_LEN_2; break; case 8: frame_length = MSP_FRAME_LEN_8; break; case 16: frame_length = MSP_FRAME_LEN_16; break; default: dev_err(dai->dev, "%s: Error: Unsupported slot-count (slots = %d)!\n", __func__, drvdata->slots); return -EINVAL; } prot_desc->tx_frame_len_1 = frame_length; prot_desc->rx_frame_len_1 = frame_length; prot_desc->tx_frame_len_2 = frame_length; prot_desc->rx_frame_len_2 = frame_length; prot_desc->tx_elem_len_1 = MSP_ELEM_LEN_16; prot_desc->rx_elem_len_1 = MSP_ELEM_LEN_16; prot_desc->tx_elem_len_2 = MSP_ELEM_LEN_16; prot_desc->rx_elem_len_2 = MSP_ELEM_LEN_16; return setup_frameper(dai, rate, prot_desc); } static int setup_clocking(struct snd_soc_dai *dai, unsigned int fmt, struct ux500_msp_config *msp_config) { switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: msp_config->tx_fsync_pol ^= 1 << TFSPOL_SHIFT; msp_config->rx_fsync_pol ^= 1 << RFSPOL_SHIFT; break; default: dev_err(dai->dev, "%s: Error: Unsopported inversion (fmt = 0x%x)!\n", __func__, fmt); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: dev_dbg(dai->dev, "%s: Codec is master.\n", __func__); msp_config->iodelay = 0x20; msp_config->rx_fsync_sel = 0; msp_config->tx_fsync_sel = 1 << TFSSEL_SHIFT; msp_config->tx_clk_sel = 0; msp_config->rx_clk_sel = 0; msp_config->srg_clk_sel = 0x2 << SCKSEL_SHIFT; break; case SND_SOC_DAIFMT_CBS_CFS: dev_dbg(dai->dev, "%s: Codec is slave.\n", __func__); msp_config->tx_clk_sel = TX_CLK_SEL_SRG; msp_config->tx_fsync_sel = TX_SYNC_SRG_PROG; msp_config->rx_clk_sel = RX_CLK_SEL_SRG; msp_config->rx_fsync_sel = RX_SYNC_SRG; msp_config->srg_clk_sel = 1 << SCKSEL_SHIFT; break; default: dev_err(dai->dev, "%s: Error: Unsopported master (fmt = 0x%x)!\n", __func__, fmt); return -EINVAL; } return 0; } static int setup_pcm_protdesc(struct snd_soc_dai *dai, unsigned int fmt, struct msp_protdesc *prot_desc) { prot_desc->rx_phase_mode = MSP_SINGLE_PHASE; prot_desc->tx_phase_mode = MSP_SINGLE_PHASE; prot_desc->rx_phase2_start_mode = MSP_PHASE2_START_MODE_IMEDIATE; prot_desc->tx_phase2_start_mode = MSP_PHASE2_START_MODE_IMEDIATE; prot_desc->rx_byte_order = MSP_BTF_MS_BIT_FIRST; prot_desc->tx_byte_order = MSP_BTF_MS_BIT_FIRST; prot_desc->tx_fsync_pol = MSP_FSYNC_POL(MSP_FSYNC_POL_ACT_HI); prot_desc->rx_fsync_pol = MSP_FSYNC_POL_ACT_HI << RFSPOL_SHIFT; if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_DSP_A) { dev_dbg(dai->dev, "%s: DSP_A.\n", __func__); prot_desc->rx_clk_pol = MSP_RISING_EDGE; prot_desc->tx_clk_pol = MSP_FALLING_EDGE; prot_desc->rx_data_delay = MSP_DELAY_1; prot_desc->tx_data_delay = MSP_DELAY_1; } else { dev_dbg(dai->dev, "%s: DSP_B.\n", __func__); prot_desc->rx_clk_pol = MSP_FALLING_EDGE; prot_desc->tx_clk_pol = MSP_RISING_EDGE; prot_desc->rx_data_delay = MSP_DELAY_0; prot_desc->tx_data_delay = MSP_DELAY_0; } prot_desc->rx_half_word_swap = MSP_SWAP_NONE; prot_desc->tx_half_word_swap = MSP_SWAP_NONE; prot_desc->compression_mode = MSP_COMPRESS_MODE_LINEAR; prot_desc->expansion_mode = MSP_EXPAND_MODE_LINEAR; prot_desc->frame_sync_ignore = MSP_FSYNC_IGNORE; return 0; } static int setup_i2s_protdesc(struct msp_protdesc *prot_desc) { prot_desc->rx_phase_mode = MSP_DUAL_PHASE; prot_desc->tx_phase_mode = MSP_DUAL_PHASE; prot_desc->rx_phase2_start_mode = MSP_PHASE2_START_MODE_FSYNC; prot_desc->tx_phase2_start_mode = MSP_PHASE2_START_MODE_FSYNC; prot_desc->rx_byte_order = MSP_BTF_MS_BIT_FIRST; prot_desc->tx_byte_order = MSP_BTF_MS_BIT_FIRST; prot_desc->tx_fsync_pol = MSP_FSYNC_POL(MSP_FSYNC_POL_ACT_LO); prot_desc->rx_fsync_pol = MSP_FSYNC_POL_ACT_LO << RFSPOL_SHIFT; prot_desc->rx_frame_len_1 = MSP_FRAME_LEN_1; prot_desc->rx_frame_len_2 = MSP_FRAME_LEN_1; prot_desc->tx_frame_len_1 = MSP_FRAME_LEN_1; prot_desc->tx_frame_len_2 = MSP_FRAME_LEN_1; prot_desc->rx_elem_len_1 = MSP_ELEM_LEN_16; prot_desc->rx_elem_len_2 = MSP_ELEM_LEN_16; prot_desc->tx_elem_len_1 = MSP_ELEM_LEN_16; prot_desc->tx_elem_len_2 = MSP_ELEM_LEN_16; prot_desc->rx_clk_pol = MSP_RISING_EDGE; prot_desc->tx_clk_pol = MSP_FALLING_EDGE; prot_desc->rx_data_delay = MSP_DELAY_0; prot_desc->tx_data_delay = MSP_DELAY_0; prot_desc->tx_half_word_swap = MSP_SWAP_NONE; prot_desc->rx_half_word_swap = MSP_SWAP_NONE; prot_desc->compression_mode = MSP_COMPRESS_MODE_LINEAR; prot_desc->expansion_mode = MSP_EXPAND_MODE_LINEAR; prot_desc->frame_sync_ignore = MSP_FSYNC_IGNORE; return 0; } static int setup_msp_config(struct snd_pcm_substream *substream, struct snd_soc_dai *dai, struct ux500_msp_config *msp_config) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); struct msp_protdesc *prot_desc = &msp_config->protdesc; struct snd_pcm_runtime *runtime = substream->runtime; unsigned int fmt = drvdata->fmt; int ret; memset(msp_config, 0, sizeof(*msp_config)); msp_config->f_inputclk = drvdata->master_clk; msp_config->tx_fifo_config = TX_FIFO_ENABLE; msp_config->rx_fifo_config = RX_FIFO_ENABLE; msp_config->def_elem_len = 1; msp_config->direction = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? MSP_DIR_TX : MSP_DIR_RX; msp_config->data_size = MSP_DATA_BITS_32; msp_config->frame_freq = runtime->rate; dev_dbg(dai->dev, "%s: f_inputclk = %u, frame_freq = %u.\n", __func__, msp_config->f_inputclk, msp_config->frame_freq); /* To avoid division by zero */ prot_desc->clocks_per_frame = 1; dev_dbg(dai->dev, "%s: rate: %u, channels: %d.\n", __func__, runtime->rate, runtime->channels); switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_MASTER_MASK)) { case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS: dev_dbg(dai->dev, "%s: SND_SOC_DAIFMT_I2S.\n", __func__); msp_config->default_protdesc = 1; msp_config->protocol = MSP_I2S_PROTOCOL; break; case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM: dev_dbg(dai->dev, "%s: SND_SOC_DAIFMT_I2S.\n", __func__); msp_config->data_size = MSP_DATA_BITS_16; msp_config->protocol = MSP_I2S_PROTOCOL; ret = setup_i2s_protdesc(prot_desc); if (ret < 0) return ret; break; case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM: dev_dbg(dai->dev, "%s: PCM format.\n", __func__); msp_config->data_size = MSP_DATA_BITS_16; msp_config->protocol = MSP_PCM_PROTOCOL; ret = setup_pcm_protdesc(dai, fmt, prot_desc); if (ret < 0) return ret; ret = setup_pcm_multichan(dai, msp_config); if (ret < 0) return ret; ret = setup_pcm_framing(dai, runtime->rate, prot_desc); if (ret < 0) return ret; break; default: dev_err(dai->dev, "%s: Error: Unsopported format (%d)!\n", __func__, fmt); return -EINVAL; } return setup_clocking(dai, fmt, msp_config); } static int ux500_msp_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { int ret = 0; struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); dev_dbg(dai->dev, "%s: MSP %d (%s): Enter.\n", __func__, dai->id, snd_pcm_stream_str(substream)); /* Enable regulator */ ret = regulator_enable(drvdata->reg_vape); if (ret != 0) { dev_err(drvdata->msp->dev, "%s: Failed to enable regulator!\n", __func__); return ret; } /* Prepare and enable clocks */ dev_dbg(dai->dev, "%s: Enabling MSP-clocks.\n", __func__); ret = clk_prepare_enable(drvdata->pclk); if (ret) { dev_err(drvdata->msp->dev, "%s: Failed to prepare/enable pclk!\n", __func__); goto err_pclk; } ret = clk_prepare_enable(drvdata->clk); if (ret) { dev_err(drvdata->msp->dev, "%s: Failed to prepare/enable clk!\n", __func__); goto err_clk; } return ret; err_clk: clk_disable_unprepare(drvdata->pclk); err_pclk: regulator_disable(drvdata->reg_vape); return ret; } static void ux500_msp_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { int ret; struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); dev_dbg(dai->dev, "%s: MSP %d (%s): Enter.\n", __func__, dai->id, snd_pcm_stream_str(substream)); if (drvdata->vape_opp_constraint == 1) { prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s", 50); drvdata->vape_opp_constraint = 0; } if (ux500_msp_i2s_close(drvdata->msp, is_playback ? MSP_DIR_TX : MSP_DIR_RX)) { dev_err(dai->dev, "%s: Error: MSP %d (%s): Unable to close i2s.\n", __func__, dai->id, snd_pcm_stream_str(substream)); } /* Disable and unprepare clocks */ clk_disable_unprepare(drvdata->clk); clk_disable_unprepare(drvdata->pclk); /* Disable regulator */ ret = regulator_disable(drvdata->reg_vape); if (ret < 0) dev_err(dai->dev, "%s: ERROR: Failed to disable regulator (%d)!\n", __func__, ret); } static int ux500_msp_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { int ret = 0; struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); struct snd_pcm_runtime *runtime = substream->runtime; struct ux500_msp_config msp_config; dev_dbg(dai->dev, "%s: MSP %d (%s): Enter (rate = %d).\n", __func__, dai->id, snd_pcm_stream_str(substream), runtime->rate); setup_msp_config(substream, dai, &msp_config); ret = ux500_msp_i2s_open(drvdata->msp, &msp_config); if (ret < 0) { dev_err(dai->dev, "%s: Error: msp_setup failed (ret = %d)!\n", __func__, ret); return ret; } /* Set OPP-level */ if ((drvdata->fmt & SND_SOC_DAIFMT_MASTER_MASK) && (drvdata->msp->f_bitclk > 19200000)) { /* If the bit-clock is higher than 19.2MHz, Vape should be * run in 100% OPP. Only when bit-clock is used (MSP master) */ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "ux500-msp-i2s", 100); drvdata->vape_opp_constraint = 1; } else { prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "ux500-msp-i2s", 50); drvdata->vape_opp_constraint = 0; } return ret; } static int ux500_msp_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { unsigned int mask, slots_active; struct snd_pcm_runtime *runtime = substream->runtime; struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); dev_dbg(dai->dev, "%s: MSP %d (%s): Enter.\n", __func__, dai->id, snd_pcm_stream_str(substream)); switch (drvdata->fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 1, 2); break; case SND_SOC_DAIFMT_DSP_B: case SND_SOC_DAIFMT_DSP_A: mask = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? drvdata->tx_mask : drvdata->rx_mask; slots_active = hweight32(mask); dev_dbg(dai->dev, "TDM-slots active: %d", slots_active); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, slots_active, slots_active); break; default: dev_err(dai->dev, "%s: Error: Unsupported protocol (fmt = 0x%x)!\n", __func__, drvdata->fmt); return -EINVAL; } return 0; } static int ux500_msp_dai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); dev_dbg(dai->dev, "%s: MSP %d: Enter.\n", __func__, dai->id); switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_MASTER_MASK)) { case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM: break; default: dev_err(dai->dev, "%s: Error: Unsupported protocol/master (fmt = 0x%x)!\n", __func__, drvdata->fmt); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: case SND_SOC_DAIFMT_NB_IF: case SND_SOC_DAIFMT_IB_IF: break; default: dev_err(dai->dev, "%s: Error: Unsupported inversion (fmt = 0x%x)!\n", __func__, drvdata->fmt); return -EINVAL; } drvdata->fmt = fmt; return 0; } static int ux500_msp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); unsigned int cap; switch (slots) { case 1: cap = 0x01; break; case 2: cap = 0x03; break; case 8: cap = 0xFF; break; case 16: cap = 0xFFFF; break; default: dev_err(dai->dev, "%s: Error: Unsupported slot-count (%d)!\n", __func__, slots); return -EINVAL; } drvdata->slots = slots; if (!(slot_width == 16)) { dev_err(dai->dev, "%s: Error: Unsupported slot-width (%d)!\n", __func__, slot_width); return -EINVAL; } drvdata->slot_width = slot_width; drvdata->tx_mask = tx_mask & cap; drvdata->rx_mask = rx_mask & cap; return 0; } static int ux500_msp_dai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); dev_dbg(dai->dev, "%s: MSP %d: Enter. clk-id: %d, freq: %u.\n", __func__, dai->id, clk_id, freq); switch (clk_id) { case UX500_MSP_MASTER_CLOCK: drvdata->master_clk = freq; break; default: dev_err(dai->dev, "%s: MSP %d: Invalid clk-id (%d)!\n", __func__, dai->id, clk_id); return -EINVAL; } return 0; } static int ux500_msp_dai_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { int ret = 0; struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); dev_dbg(dai->dev, "%s: MSP %d (%s): Enter (msp->id = %d, cmd = %d).\n", __func__, dai->id, snd_pcm_stream_str(substream), (int)drvdata->msp->id, cmd); ret = ux500_msp_i2s_trigger(drvdata->msp, cmd, substream->stream); return ret; } static int ux500_msp_dai_of_probe(struct snd_soc_dai *dai) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); struct snd_dmaengine_dai_dma_data *playback_dma_data; struct snd_dmaengine_dai_dma_data *capture_dma_data; playback_dma_data = devm_kzalloc(dai->dev, sizeof(*playback_dma_data), GFP_KERNEL); if (!playback_dma_data) return -ENOMEM; capture_dma_data = devm_kzalloc(dai->dev, sizeof(*capture_dma_data), GFP_KERNEL); if (!capture_dma_data) return -ENOMEM; playback_dma_data->addr = drvdata->msp->playback_dma_data.tx_rx_addr; capture_dma_data->addr = drvdata->msp->capture_dma_data.tx_rx_addr; playback_dma_data->maxburst = 4; capture_dma_data->maxburst = 4; snd_soc_dai_init_dma_data(dai, playback_dma_data, capture_dma_data); return 0; } static int ux500_msp_dai_probe(struct snd_soc_dai *dai) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev); struct msp_i2s_platform_data *pdata = dai->dev->platform_data; int ret; if (!pdata) { ret = ux500_msp_dai_of_probe(dai); return ret; } drvdata->msp->playback_dma_data.data_size = drvdata->slot_width; drvdata->msp->capture_dma_data.data_size = drvdata->slot_width; snd_soc_dai_init_dma_data(dai, &drvdata->msp->playback_dma_data, &drvdata->msp->capture_dma_data); return 0; } static struct snd_soc_dai_ops ux500_msp_dai_ops[] = { { .set_sysclk = ux500_msp_dai_set_dai_sysclk, .set_fmt = ux500_msp_dai_set_dai_fmt, .set_tdm_slot = ux500_msp_dai_set_tdm_slot, .startup = ux500_msp_dai_startup, .shutdown = ux500_msp_dai_shutdown, .prepare = ux500_msp_dai_prepare, .trigger = ux500_msp_dai_trigger, .hw_params = ux500_msp_dai_hw_params, } }; static struct snd_soc_dai_driver ux500_msp_dai_drv = { .probe = ux500_msp_dai_probe, .suspend = NULL, .resume = NULL, .playback.channels_min = UX500_MSP_MIN_CHANNELS, .playback.channels_max = UX500_MSP_MAX_CHANNELS, .playback.rates = UX500_I2S_RATES, .playback.formats = UX500_I2S_FORMATS, .capture.channels_min = UX500_MSP_MIN_CHANNELS, .capture.channels_max = UX500_MSP_MAX_CHANNELS, .capture.rates = UX500_I2S_RATES, .capture.formats = UX500_I2S_FORMATS, .ops = ux500_msp_dai_ops, }; static const struct snd_soc_component_driver ux500_msp_component = { .name = "ux500-msp", }; static int ux500_msp_drv_probe(struct platform_device *pdev) { struct ux500_msp_i2s_drvdata *drvdata; struct msp_i2s_platform_data *pdata = pdev->dev.platform_data; struct device_node *np = pdev->dev.of_node; int ret = 0; if (!pdata && !np) { dev_err(&pdev->dev, "No platform data or Device Tree found\n"); return -ENODEV; } drvdata = devm_kzalloc(&pdev->dev, sizeof(struct ux500_msp_i2s_drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->fmt = 0; drvdata->slots = 1; drvdata->tx_mask = 0x01; drvdata->rx_mask = 0x01; drvdata->slot_width = 16; drvdata->master_clk = MSP_INPUT_FREQ_APB; drvdata->reg_vape = devm_regulator_get(&pdev->dev, "v-ape"); if (IS_ERR(drvdata->reg_vape)) { ret = (int)PTR_ERR(drvdata->reg_vape); dev_err(&pdev->dev, "%s: ERROR: Failed to get Vape supply (%d)!\n", __func__, ret); return ret; } prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)pdev->name, 50); drvdata->pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(drvdata->pclk)) { ret = (int)PTR_ERR(drvdata->pclk); dev_err(&pdev->dev, "%s: ERROR: devm_clk_get of pclk failed (%d)!\n", __func__, ret); return ret; } drvdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(drvdata->clk)) { ret = (int)PTR_ERR(drvdata->clk); dev_err(&pdev->dev, "%s: ERROR: devm_clk_get failed (%d)!\n", __func__, ret); return ret; } ret = ux500_msp_i2s_init_msp(pdev, &drvdata->msp, pdev->dev.platform_data); if (!drvdata->msp) { dev_err(&pdev->dev, "%s: ERROR: Failed to init MSP-struct (%d)!", __func__, ret); return ret; } dev_set_drvdata(&pdev->dev, drvdata); ret = snd_soc_register_component(&pdev->dev, &ux500_msp_component, &ux500_msp_dai_drv, 1); if (ret < 0) { dev_err(&pdev->dev, "Error: %s: Failed to register MSP%d!\n", __func__, drvdata->msp->id); return ret; } ret = ux500_pcm_register_platform(pdev); if (ret < 0) { dev_err(&pdev->dev, "Error: %s: Failed to register PCM platform device!\n", __func__); goto err_reg_plat; } return 0; err_reg_plat: snd_soc_unregister_component(&pdev->dev); return ret; } static int ux500_msp_drv_remove(struct platform_device *pdev) { struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(&pdev->dev); ux500_pcm_unregister_platform(pdev); snd_soc_unregister_component(&pdev->dev); prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s"); ux500_msp_i2s_cleanup_msp(pdev, drvdata->msp); return 0; } static const struct of_device_id ux500_msp_i2s_match[] = { { .compatible = "stericsson,ux500-msp-i2s", }, {}, }; static struct platform_driver msp_i2s_driver = { .driver = { .name = "ux500-msp-i2s", .of_match_table = ux500_msp_i2s_match, }, .probe = ux500_msp_drv_probe, .remove = ux500_msp_drv_remove, }; module_platform_driver(msp_i2s_driver); MODULE_LICENSE("GPL v2");
gpl-2.0
Mazout360/kernel-maz
drivers/bluetooth/btmrvl_debugfs.c
967
5933
/** * Marvell Bluetooth driver: debugfs related functions * * Copyright (C) 2009, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. **/ #include <linux/debugfs.h> #include <linux/slab.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btmrvl_drv.h" struct btmrvl_debugfs_data { struct dentry *config_dir; struct dentry *status_dir; }; static ssize_t btmrvl_hscfgcmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = kstrtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.hscfgcmd = result; if (priv->btmrvl_dev.hscfgcmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_hscfgcmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscfgcmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hscfgcmd_fops = { .read = btmrvl_hscfgcmd_read, .write = btmrvl_hscfgcmd_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = kstrtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.pscmd = result; if (priv->btmrvl_dev.pscmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_pscmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.pscmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_pscmd_fops = { .read = btmrvl_pscmd_read, .write = btmrvl_pscmd_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = kstrtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.hscmd = result; if (priv->btmrvl_dev.hscmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_hscmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hscmd_fops = { .read = btmrvl_hscmd_read, .write = btmrvl_hscmd_write, .open = simple_open, .llseek = default_llseek, }; void btmrvl_debugfs_init(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_debugfs_data *dbg; if (!hdev->debugfs) return; dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); priv->debugfs_data = dbg; if (!dbg) { BT_ERR("Can not allocate memory for btmrvl_debugfs_data."); return; } dbg->config_dir = debugfs_create_dir("config", hdev->debugfs); debugfs_create_u8("psmode", 0644, dbg->config_dir, &priv->btmrvl_dev.psmode); debugfs_create_file("pscmd", 0644, dbg->config_dir, priv, &btmrvl_pscmd_fops); debugfs_create_x16("gpiogap", 0644, dbg->config_dir, &priv->btmrvl_dev.gpio_gap); debugfs_create_u8("hsmode", 0644, dbg->config_dir, &priv->btmrvl_dev.hsmode); debugfs_create_file("hscmd", 0644, dbg->config_dir, priv, &btmrvl_hscmd_fops); debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, priv, &btmrvl_hscfgcmd_fops); dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); debugfs_create_u8("curpsmode", 0444, dbg->status_dir, &priv->adapter->psmode); debugfs_create_u8("psstate", 0444, dbg->status_dir, &priv->adapter->ps_state); debugfs_create_u8("hsstate", 0444, dbg->status_dir, &priv->adapter->hs_state); debugfs_create_u8("txdnldready", 0444, dbg->status_dir, &priv->btmrvl_dev.tx_dnld_rdy); } void btmrvl_debugfs_remove(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_debugfs_data *dbg = priv->debugfs_data; if (!dbg) return; debugfs_remove_recursive(dbg->config_dir); debugfs_remove_recursive(dbg->status_dir); kfree(dbg); }
gpl-2.0
TangxingZhou/linux
arch/alpha/kernel/ptrace.c
1479
9154
/* ptrace.c */ /* By Ross Biro 1/23/92 */ /* edited by Linus Torvalds */ /* mangled further by Bob Manson (manson@santafe.edu) */ /* more mutilation by David Mosberger (davidm@azstarnet.com) */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/tracehook.h> #include <linux/audit.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/fpu.h> #include "proto.h" #define DEBUG DBG_MEM #undef DEBUG #ifdef DEBUG enum { DBG_MEM = (1<<0), DBG_BPT = (1<<1), DBG_MEM_ALL = (1<<2) }; #define DBG(fac,args) {if ((fac) & DEBUG) printk args;} #else #define DBG(fac,args) #endif #define BREAKINST 0x00000080 /* call_pal bpt */ /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Processes always block with the following stack-layout: * * +================================+ <---- task + 2*PAGE_SIZE * | PALcode saved frame (ps, pc, | ^ * | gp, a0, a1, a2) | | * +================================+ | struct pt_regs * | | | * | frame generated by SAVE_ALL | | * | | v * +================================+ * | | ^ * | frame saved by do_switch_stack | | struct switch_stack * | | v * +================================+ */ /* * The following table maps a register index into the stack offset at * which the register is saved. Register indices are 0-31 for integer * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and * zero have no stack-slot and need to be treated specially (see * get_reg/put_reg below). */ enum { REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64 }; #define PT_REG(reg) \ (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg)) #define SW_REG(reg) \ (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \ + offsetof(struct switch_stack, reg)) static int regoff[] = { PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3), PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7), PT_REG( r8), SW_REG( r9), SW_REG( r10), SW_REG( r11), SW_REG( r12), SW_REG( r13), SW_REG( r14), SW_REG( r15), PT_REG( r16), PT_REG( r17), PT_REG( r18), PT_REG( r19), PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23), PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27), PT_REG( r28), PT_REG( gp), -1, -1, SW_REG(fp[ 0]), SW_REG(fp[ 1]), SW_REG(fp[ 2]), SW_REG(fp[ 3]), SW_REG(fp[ 4]), SW_REG(fp[ 5]), SW_REG(fp[ 6]), SW_REG(fp[ 7]), SW_REG(fp[ 8]), SW_REG(fp[ 9]), SW_REG(fp[10]), SW_REG(fp[11]), SW_REG(fp[12]), SW_REG(fp[13]), SW_REG(fp[14]), SW_REG(fp[15]), SW_REG(fp[16]), SW_REG(fp[17]), SW_REG(fp[18]), SW_REG(fp[19]), SW_REG(fp[20]), SW_REG(fp[21]), SW_REG(fp[22]), SW_REG(fp[23]), SW_REG(fp[24]), SW_REG(fp[25]), SW_REG(fp[26]), SW_REG(fp[27]), SW_REG(fp[28]), SW_REG(fp[29]), SW_REG(fp[30]), SW_REG(fp[31]), PT_REG( pc) }; static unsigned long zero; /* * Get address of register REGNO in task TASK. */ static unsigned long * get_reg_addr(struct task_struct * task, unsigned long regno) { unsigned long *addr; if (regno == 30) { addr = &task_thread_info(task)->pcb.usp; } else if (regno == 65) { addr = &task_thread_info(task)->pcb.unique; } else if (regno == 31 || regno > 65) { zero = 0; addr = &zero; } else { addr = task_stack_page(task) + regoff[regno]; } return addr; } /* * Get contents of register REGNO in task TASK. */ static unsigned long get_reg(struct task_struct * task, unsigned long regno) { /* Special hack for fpcr -- combine hardware and software bits. */ if (regno == 63) { unsigned long fpcr = *get_reg_addr(task, regno); unsigned long swcr = task_thread_info(task)->ieee_state & IEEE_SW_MASK; swcr = swcr_update_status(swcr, fpcr); return fpcr | swcr; } return *get_reg_addr(task, regno); } /* * Write contents of register REGNO in task TASK. */ static int put_reg(struct task_struct *task, unsigned long regno, unsigned long data) { if (regno == 63) { task_thread_info(task)->ieee_state = ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK) | (data & IEEE_SW_MASK)); data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data); } *get_reg_addr(task, regno) = data; return 0; } static inline int read_int(struct task_struct *task, unsigned long addr, int * data) { int copied = access_process_vm(task, addr, data, sizeof(int), 0); return (copied == sizeof(int)) ? 0 : -EIO; } static inline int write_int(struct task_struct *task, unsigned long addr, int data) { int copied = access_process_vm(task, addr, &data, sizeof(int), 1); return (copied == sizeof(int)) ? 0 : -EIO; } /* * Set breakpoint. */ int ptrace_set_bpt(struct task_struct * child) { int displ, i, res, reg_b, nsaved = 0; unsigned int insn, op_code; unsigned long pc; pc = get_reg(child, REG_PC); res = read_int(child, pc, (int *) &insn); if (res < 0) return res; op_code = insn >> 26; if (op_code >= 0x30) { /* * It's a branch: instead of trying to figure out * whether the branch will be taken or not, we'll put * a breakpoint at either location. This is simpler, * more reliable, and probably not a whole lot slower * than the alternative approach of emulating the * branch (emulation can be tricky for fp branches). */ displ = ((s32)(insn << 11)) >> 9; task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; if (displ) /* guard against unoptimized code */ task_thread_info(child)->bpt_addr[nsaved++] = pc + 4 + displ; DBG(DBG_BPT, ("execing branch\n")); } else if (op_code == 0x1a) { reg_b = (insn >> 16) & 0x1f; task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); DBG(DBG_BPT, ("execing jump\n")); } else { task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; DBG(DBG_BPT, ("execing normal insn\n")); } /* install breakpoints: */ for (i = 0; i < nsaved; ++i) { res = read_int(child, task_thread_info(child)->bpt_addr[i], (int *) &insn); if (res < 0) return res; task_thread_info(child)->bpt_insn[i] = insn; DBG(DBG_BPT, (" -> next_pc=%lx\n", task_thread_info(child)->bpt_addr[i])); res = write_int(child, task_thread_info(child)->bpt_addr[i], BREAKINST); if (res < 0) return res; } task_thread_info(child)->bpt_nsaved = nsaved; return 0; } /* * Ensure no single-step breakpoint is pending. Returns non-zero * value if child was being single-stepped. */ int ptrace_cancel_bpt(struct task_struct * child) { int i, nsaved = task_thread_info(child)->bpt_nsaved; task_thread_info(child)->bpt_nsaved = 0; if (nsaved > 2) { printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); nsaved = 2; } for (i = 0; i < nsaved; ++i) { write_int(child, task_thread_info(child)->bpt_addr[i], task_thread_info(child)->bpt_insn[i]); } return (nsaved != 0); } void user_enable_single_step(struct task_struct *child) { /* Mark single stepping. */ task_thread_info(child)->bpt_nsaved = -1; } void user_disable_single_step(struct task_struct *child) { ptrace_cancel_bpt(child); } /* * Called by kernel/ptrace.c when detaching.. * * Make sure the single step bit is not set. */ void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { unsigned long tmp; size_t copied; long ret; switch (request) { /* When I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); ret = -EIO; if (copied != sizeof(tmp)) break; force_successful_syscall_return(); ret = tmp; break; /* Read register number ADDR. */ case PTRACE_PEEKUSR: force_successful_syscall_return(); ret = get_reg(child, addr); DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret)); break; /* When I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = generic_ptrace_pokedata(child, addr, data); break; case PTRACE_POKEUSR: /* write the specified register */ DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data)); ret = put_reg(child, addr, data); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage unsigned long syscall_trace_enter(void) { unsigned long ret = 0; struct pt_regs *regs = current_pt_regs(); if (test_thread_flag(TIF_SYSCALL_TRACE) && tracehook_report_syscall_entry(current_pt_regs())) ret = -1UL; audit_syscall_entry(regs->r0, regs->r16, regs->r17, regs->r18, regs->r19); return ret ?: current_pt_regs()->r0; } asmlinkage void syscall_trace_leave(void) { audit_syscall_exit(current_pt_regs()); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(current_pt_regs(), 0); }
gpl-2.0
syntheticpp/linux
arch/sh/kernel/hw_breakpoint.c
1735
8818
/* * arch/sh/kernel/hw_breakpoint.c * * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/percpu.h> #include <linux/kallsyms.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/io.h> #include <linux/clk.h> #include <asm/hw_breakpoint.h> #include <asm/mmu_context.h> #include <asm/ptrace.h> #include <asm/traps.h> /* * Stores the breakpoints currently in use on each breakpoint address * register for each cpus */ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); /* * A dummy placeholder for early accesses until the CPUs get a chance to * register their UBCs later in the boot process. */ static struct sh_ubc ubc_dummy = { .num_events = 0 }; static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy; /* * Install a perf counter breakpoint. * * We seek a free UBC channel and use it for this breakpoint. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return -EBUSY; clk_enable(sh_ubc->clk); sh_ubc->enable(info, i); return 0; } /* * Uninstall the breakpoint contained in the given counter. * * First we search the debug address register it uses and then we disable * it. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); if (*slot == bp) { *slot = NULL; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return; sh_ubc->disable(info, i); clk_disable(sh_ubc->clk); } static int get_hbp_len(u16 hbp_len) { unsigned int len_in_bytes = 0; switch (hbp_len) { case SH_BREAKPOINT_LEN_1: len_in_bytes = 1; break; case SH_BREAKPOINT_LEN_2: len_in_bytes = 2; break; case SH_BREAKPOINT_LEN_4: len_in_bytes = 4; break; case SH_BREAKPOINT_LEN_8: len_in_bytes = 8; break; } return len_in_bytes; } /* * Check for virtual address in kernel space. */ int arch_check_bp_in_kernelspace(struct perf_event *bp) { unsigned int len; unsigned long va; struct arch_hw_breakpoint *info = counter_arch_bp(bp); va = info->address; len = get_hbp_len(info->len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } int arch_bp_generic_fields(int sh_len, int sh_type, int *gen_len, int *gen_type) { /* Len */ switch (sh_len) { case SH_BREAKPOINT_LEN_1: *gen_len = HW_BREAKPOINT_LEN_1; break; case SH_BREAKPOINT_LEN_2: *gen_len = HW_BREAKPOINT_LEN_2; break; case SH_BREAKPOINT_LEN_4: *gen_len = HW_BREAKPOINT_LEN_4; break; case SH_BREAKPOINT_LEN_8: *gen_len = HW_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ switch (sh_type) { case SH_BREAKPOINT_READ: *gen_type = HW_BREAKPOINT_R; case SH_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; case SH_BREAKPOINT_RW: *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; break; default: return -EINVAL; } return 0; } static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); info->address = bp->attr.bp_addr; /* Len */ switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = SH_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->len = SH_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->len = SH_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: info->len = SH_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ switch (bp->attr.bp_type) { case HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_READ; break; case HW_BREAKPOINT_W: info->type = SH_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_RW; break; default: return -EINVAL; } return 0; } /* * Validate the arch-specific HW Breakpoint register settings */ int arch_validate_hwbkpt_settings(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; ret = arch_build_bp_info(bp); if (ret) return ret; ret = -EINVAL; switch (info->len) { case SH_BREAKPOINT_LEN_1: align = 0; break; case SH_BREAKPOINT_LEN_2: align = 1; break; case SH_BREAKPOINT_LEN_4: align = 3; break; case SH_BREAKPOINT_LEN_8: align = 7; break; default: return ret; } /* * For kernel-addresses, either the address or symbol name can be * specified. */ if (info->name) info->address = (unsigned long)kallsyms_lookup_name(info->name); /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ if (info->address & align) return -EINVAL; return 0; } /* * Release the user breakpoints used by ptrace */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < sh_ubc->num_events; i++) { unregister_hw_breakpoint(t->ptrace_bps[i]); t->ptrace_bps[i] = NULL; } } static int __kprobes hw_breakpoint_handler(struct die_args *args) { int cpu, i, rc = NOTIFY_STOP; struct perf_event *bp; unsigned int cmf, resume_mask; /* * Do an early return if none of the channels triggered. */ cmf = sh_ubc->triggered_mask(); if (unlikely(!cmf)) return NOTIFY_DONE; /* * By default, resume all of the active channels. */ resume_mask = sh_ubc->active_mask(); /* * Disable breakpoints during exception handling. */ sh_ubc->disable_all(); cpu = get_cpu(); for (i = 0; i < sh_ubc->num_events; i++) { unsigned long event_mask = (1 << i); if (likely(!(cmf & event_mask))) continue; /* * The counter may be concurrently released but that can only * occur from a call_rcu() path. We can then safely fetch * the breakpoint, use its callback, touch its counter * while we are in an rcu_read_lock() path. */ rcu_read_lock(); bp = per_cpu(bp_per_reg[i], cpu); if (bp) rc = NOTIFY_DONE; /* * Reset the condition match flag to denote completion of * exception handling. */ sh_ubc->clear_triggered_mask(event_mask); /* * bp can be NULL due to concurrent perf counter * removing. */ if (!bp) { rcu_read_unlock(); break; } /* * Don't restore the channel if the breakpoint is from * ptrace, as it always operates in one-shot mode. */ if (bp->overflow_handler == ptrace_triggered) resume_mask &= ~(1 << i); perf_bp_event(bp, args->regs); /* Deliver the signal to userspace */ if (!arch_check_bp_in_kernelspace(bp)) { siginfo_t info; info.si_signo = args->signr; info.si_errno = notifier_to_errno(rc); info.si_code = TRAP_HWBKPT; force_sig_info(args->signr, &info, current); } rcu_read_unlock(); } if (cmf == 0) rc = NOTIFY_DONE; sh_ubc->enable_all(resume_mask); put_cpu(); return rc; } BUILD_TRAP_HANDLER(breakpoint) { unsigned long ex = lookup_exception_vector(); TRAP_HANDLER_DECL; notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); } /* * Handle debug exception notifications. */ int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data) { struct die_args *args = data; if (val != DIE_BREAKPOINT) return NOTIFY_DONE; /* * If the breakpoint hasn't been triggered by the UBC, it's * probably from a debugger, so don't do anything more here. * * This also permits the UBC interface clock to remain off for * non-UBC breakpoints, as we don't need to check the triggered * or active channel masks. */ if (args->trapnr != sh_ubc->trap_nr) return NOTIFY_DONE; return hw_breakpoint_handler(data); } void hw_breakpoint_pmu_read(struct perf_event *bp) { /* TODO */ } int register_sh_ubc(struct sh_ubc *ubc) { /* Bail if it's already assigned */ if (sh_ubc != &ubc_dummy) return -EBUSY; sh_ubc = ubc; pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name); WARN_ON(ubc->num_events > HBP_NUM); return 0; }
gpl-2.0
TheSSJ/android_kernel_asus_moorefield
arch/sparc/kernel/nmi.c
2503
6314
/* Pseudo NMI support on sparc64 systems. * * Copyright (C) 2009 David S. Miller <davem@davemloft.net> * * The NMI watchdog support and infrastructure is based almost * entirely upon the x86 NMI support code. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/nmi.h> #include <linux/export.h> #include <linux/kprobes.h> #include <linux/kernel_stat.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/kdebug.h> #include <linux/delay.h> #include <linux/smp.h> #include <asm/perf_event.h> #include <asm/ptrace.h> #include <asm/pcr.h> #include "kstack.h" /* We don't have a real NMI on sparc64, but we can fake one * up using profiling counter overflow interrupts and interrupt * levels. * * The profile overflow interrupts at level 15, so we use * level 14 as our IRQ off level. */ static int panic_on_timeout; /* nmi_active: * >0: the NMI watchdog is active, but can be disabled * <0: the NMI watchdog has not been set up, and cannot be enabled * 0: the NMI watchdog is disabled, but can be enabled */ atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ EXPORT_SYMBOL(nmi_active); static unsigned int nmi_hz = HZ; static DEFINE_PER_CPU(short, wd_enabled); static int endflag __initdata; static DEFINE_PER_CPU(unsigned int, last_irq_sum); static DEFINE_PER_CPU(long, alert_counter); static DEFINE_PER_CPU(int, nmi_touch); void touch_nmi_watchdog(void) { if (atomic_read(&nmi_active)) { int cpu; for_each_present_cpu(cpu) { if (per_cpu(nmi_touch, cpu) != 1) per_cpu(nmi_touch, cpu) = 1; } } touch_softlockup_watchdog(); } EXPORT_SYMBOL(touch_nmi_watchdog); static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) { if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) return; console_verbose(); bust_spinlocks(1); printk(KERN_EMERG "%s", str); printk(" on CPU%d, ip %08lx, registers:\n", smp_processor_id(), regs->tpc); show_regs(regs); dump_stack(); bust_spinlocks(0); if (do_panic || panic_on_oops) panic("Non maskable interrupt"); nmi_exit(); local_irq_enable(); do_exit(SIGBUS); } notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; void *orig_sp; clear_softint(1 << irq); local_cpu_data().__nmi_count++; nmi_enter(); orig_sp = set_hardirq_stack(); if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; else pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); sum = local_cpu_data().irq0_irqs; if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; } if (!touched && __get_cpu_var(last_irq_sum) == sum) { __this_cpu_inc(alert_counter); if (__this_cpu_read(alert_counter) == 30 * nmi_hz) die_nmi("BUG: NMI Watchdog detected LOCKUP", regs, panic_on_timeout); } else { __get_cpu_var(last_irq_sum) = sum; __this_cpu_write(alert_counter, 0); } if (__get_cpu_var(wd_enabled)) { pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); } restore_hardirq_stack(orig_sp); nmi_exit(); } static inline unsigned int get_nmi_count(int cpu) { return cpu_data(cpu).__nmi_count; } static __init void nmi_cpu_busy(void *data) { local_irq_enable_in_hardirq(); while (endflag == 0) mb(); } static void report_broken_nmi(int cpu, int *prev_nmi_count) { printk(KERN_CONT "\n"); printk(KERN_WARNING "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n", cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); printk(KERN_WARNING "Please report this to bugzilla.kernel.org,\n"); printk(KERN_WARNING "and attach the output of the 'dmesg' command.\n"); per_cpu(wd_enabled, cpu) = 0; atomic_dec(&nmi_active); } void stop_nmi_watchdog(void *unused) { pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); __get_cpu_var(wd_enabled) = 0; atomic_dec(&nmi_active); } static int __init check_nmi_watchdog(void) { unsigned int *prev_nmi_count; int cpu, err; if (!atomic_read(&nmi_active)) return 0; prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); if (!prev_nmi_count) { err = -ENOMEM; goto error; } printk(KERN_INFO "Testing NMI watchdog ... "); smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); for_each_possible_cpu(cpu) prev_nmi_count[cpu] = get_nmi_count(cpu); local_irq_enable(); mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ for_each_online_cpu(cpu) { if (!per_cpu(wd_enabled, cpu)) continue; if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) report_broken_nmi(cpu, prev_nmi_count); } endflag = 1; if (!atomic_read(&nmi_active)) { kfree(prev_nmi_count); atomic_set(&nmi_active, -1); err = -ENODEV; goto error; } printk("OK.\n"); nmi_hz = 1; kfree(prev_nmi_count); return 0; error: on_each_cpu(stop_nmi_watchdog, NULL, 1); return err; } void start_nmi_watchdog(void *unused) { __get_cpu_var(wd_enabled) = 1; atomic_inc(&nmi_active); pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); } static void nmi_adjust_hz_one(void *unused) { if (!__get_cpu_var(wd_enabled)) return; pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); } void nmi_adjust_hz(unsigned int new_hz) { nmi_hz = new_hz; on_each_cpu(nmi_adjust_hz_one, NULL, 1); } EXPORT_SYMBOL_GPL(nmi_adjust_hz); static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) { on_each_cpu(stop_nmi_watchdog, NULL, 1); return 0; } static struct notifier_block nmi_reboot_notifier = { .notifier_call = nmi_shutdown, }; int __init nmi_init(void) { int err; on_each_cpu(start_nmi_watchdog, NULL, 1); err = check_nmi_watchdog(); if (!err) { err = register_reboot_notifier(&nmi_reboot_notifier); if (err) { on_each_cpu(stop_nmi_watchdog, NULL, 1); atomic_set(&nmi_active, -1); } } return err; } static int __init setup_nmi_watchdog(char *str) { if (!strncmp(str, "panic", 5)) panic_on_timeout = 1; return 0; } __setup("nmi_watchdog=", setup_nmi_watchdog);
gpl-2.0
shizhai/wprobe
build_dir/target-mips_r2_uClibc-0.9.33.2/linux-ar71xx_generic/linux-3.10.4/arch/sparc/prom/init_64.c
2759
1368
/* * init.c: Initialize internal variables used by the PROM * library functions. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ctype.h> #include <asm/openprom.h> #include <asm/oplib.h> /* OBP version string. */ char prom_version[80]; /* The root node of the prom device tree. */ int prom_stdout; phandle prom_chosen_node; /* You must call prom_init() before you attempt to use any of the * routines in the prom library. * It gets passed the pointer to the PROM vector. */ extern void prom_cif_init(void *, void *); void __init prom_init(void *cif_handler, void *cif_stack) { phandle node; prom_cif_init(cif_handler, cif_stack); prom_chosen_node = prom_finddevice(prom_chosen_path); if (!prom_chosen_node || (s32)prom_chosen_node == -1) prom_halt(); prom_stdout = prom_getint(prom_chosen_node, "stdout"); node = prom_finddevice("/openprom"); if (!node || (s32)node == -1) prom_halt(); prom_getstring(node, "version", prom_version, sizeof(prom_version)); prom_printf("\n"); } void __init prom_init_report(void) { printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version); printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible); }
gpl-2.0
savoca/otus
drivers/staging/comedi/drivers/cb_pcidas64.c
4295
125437
/* comedi/drivers/cb_pcidas64.c This is a driver for the ComputerBoards/MeasurementComputing PCI-DAS 64xx, 60xx, and 4020 cards. Author: Frank Mori Hess <fmhess@users.sourceforge.net> Copyright (C) 2001, 2002 Frank Mori Hess Thanks also go to the following people: Steve Rosenbluth, for providing the source code for his pci-das6402 driver, and source code for working QNX pci-6402 drivers by Greg Laird and Mariusz Bogacz. None of the code was used directly here, but it was useful as an additional source of documentation on how to program the boards. John Sims, for much testing and feedback on pcidas-4020 support. COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-8 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************/ /* Driver: cb_pcidas64 Description: MeasurementComputing PCI-DAS64xx, 60XX, and 4020 series with the PLX 9080 PCI controller Author: Frank Mori Hess <fmhess@users.sourceforge.net> Status: works Updated: 2002-10-09 Devices: [Measurement Computing] PCI-DAS6402/16 (cb_pcidas64), PCI-DAS6402/12, PCI-DAS64/M1/16, PCI-DAS64/M2/16, PCI-DAS64/M3/16, PCI-DAS6402/16/JR, PCI-DAS64/M1/16/JR, PCI-DAS64/M2/16/JR, PCI-DAS64/M3/16/JR, PCI-DAS64/M1/14, PCI-DAS64/M2/14, PCI-DAS64/M3/14, PCI-DAS6013, PCI-DAS6014, PCI-DAS6023, PCI-DAS6025, PCI-DAS6030, PCI-DAS6031, PCI-DAS6032, PCI-DAS6033, PCI-DAS6034, PCI-DAS6035, PCI-DAS6036, PCI-DAS6040, PCI-DAS6052, PCI-DAS6070, PCI-DAS6071, PCI-DAS4020/12 Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) These boards may be autocalibrated with the comedi_calibrate utility. To select the bnc trigger input on the 4020 (instead of the dio input), specify a nonzero channel in the chanspec. If you wish to use an external master clock on the 4020, you may do so by setting the scan_begin_src to TRIG_OTHER, and using an INSN_CONFIG_TIMER_1 configuration insn to configure the divisor to use for the external clock. Some devices are not identified because the PCI device IDs are not yet known. If you have such a board, please file a bug report at https://bugs.comedi.org. */ /* TODO: make it return error if user attempts an ai command that uses the external queue, and an ao command simultaneously user counter subdevice there are a number of boards this driver will support when they are fully released, but does not yet since the pci device id numbers are not yet available. support prescaled 100khz clock for slow pacing (not available on 6000 series?) make ao fifo size adjustable like ai fifo */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/interrupt.h> #include "comedi_pci.h" #include "8253.h" #include "8255.h" #include "plx9080.h" #include "comedi_fc.h" #undef PCIDAS64_DEBUG /* disable debugging code */ /* #define PCIDAS64_DEBUG enable debugging code */ #ifdef PCIDAS64_DEBUG #define DEBUG_PRINT(format, args...) printk(format , ## args) #else #define DEBUG_PRINT(format, args...) #endif #define TIMER_BASE 25 /* 40MHz master clock */ #define PRESCALED_TIMER_BASE 10000 /* 100kHz 'prescaled' clock for slow acquisition, maybe I'll support this someday */ #define DMA_BUFFER_SIZE 0x1000 #define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307 /* maximum value that can be loaded into board's 24-bit counters*/ static const int max_counter_value = 0xffffff; /* PCI-DAS64xxx base addresses */ /* indices of base address regions */ enum base_address_regions { PLX9080_BADDRINDEX = 0, MAIN_BADDRINDEX = 2, DIO_COUNTER_BADDRINDEX = 3, }; /* priv(dev)->main_iobase registers */ enum write_only_registers { INTR_ENABLE_REG = 0x0, /* interrupt enable register */ HW_CONFIG_REG = 0x2, /* hardware config register */ DAQ_SYNC_REG = 0xc, DAQ_ATRIG_LOW_4020_REG = 0xc, ADC_CONTROL0_REG = 0x10, /* adc control register 0 */ ADC_CONTROL1_REG = 0x12, /* adc control register 1 */ CALIBRATION_REG = 0x14, ADC_SAMPLE_INTERVAL_LOWER_REG = 0x16, /* lower 16 bits of adc sample interval counter */ ADC_SAMPLE_INTERVAL_UPPER_REG = 0x18, /* upper 8 bits of adc sample interval counter */ ADC_DELAY_INTERVAL_LOWER_REG = 0x1a, /* lower 16 bits of delay interval counter */ ADC_DELAY_INTERVAL_UPPER_REG = 0x1c, /* upper 8 bits of delay interval counter */ ADC_COUNT_LOWER_REG = 0x1e, /* lower 16 bits of hardware conversion/scan counter */ ADC_COUNT_UPPER_REG = 0x20, /* upper 8 bits of hardware conversion/scan counter */ ADC_START_REG = 0x22, /* software trigger to start acquisition */ ADC_CONVERT_REG = 0x24, /* initiates single conversion */ ADC_QUEUE_CLEAR_REG = 0x26, /* clears adc queue */ ADC_QUEUE_LOAD_REG = 0x28, /* loads adc queue */ ADC_BUFFER_CLEAR_REG = 0x2a, ADC_QUEUE_HIGH_REG = 0x2c, /* high channel for internal queue, use adc_chan_bits() inline above */ DAC_CONTROL0_REG = 0x50, /* dac control register 0 */ DAC_CONTROL1_REG = 0x52, /* dac control register 0 */ DAC_SAMPLE_INTERVAL_LOWER_REG = 0x54, /* lower 16 bits of dac sample interval counter */ DAC_SAMPLE_INTERVAL_UPPER_REG = 0x56, /* upper 8 bits of dac sample interval counter */ DAC_SELECT_REG = 0x60, DAC_START_REG = 0x64, DAC_BUFFER_CLEAR_REG = 0x66, /* clear dac buffer */ }; static inline unsigned int dac_convert_reg(unsigned int channel) { return 0x70 + (2 * (channel & 0x1)); } static inline unsigned int dac_lsb_4020_reg(unsigned int channel) { return 0x70 + (4 * (channel & 0x1)); } static inline unsigned int dac_msb_4020_reg(unsigned int channel) { return 0x72 + (4 * (channel & 0x1)); } enum read_only_registers { HW_STATUS_REG = 0x0, /* hardware status register, reading this apparently clears pending interrupts as well */ PIPE1_READ_REG = 0x4, ADC_READ_PNTR_REG = 0x8, LOWER_XFER_REG = 0x10, ADC_WRITE_PNTR_REG = 0xc, PREPOST_REG = 0x14, }; enum read_write_registers { I8255_4020_REG = 0x48, /* 8255 offset, for 4020 only */ ADC_QUEUE_FIFO_REG = 0x100, /* external channel/gain queue, uses same bits as ADC_QUEUE_LOAD_REG */ ADC_FIFO_REG = 0x200, /* adc data fifo */ DAC_FIFO_REG = 0x300, /* dac data fifo, has weird interactions with external channel queue */ }; /* priv(dev)->dio_counter_iobase registers */ enum dio_counter_registers { DIO_8255_OFFSET = 0x0, DO_REG = 0x20, DI_REG = 0x28, DIO_DIRECTION_60XX_REG = 0x40, DIO_DATA_60XX_REG = 0x48, }; /* bit definitions for write-only registers */ enum intr_enable_contents { ADC_INTR_SRC_MASK = 0x3, /* bits that set adc interrupt source */ ADC_INTR_QFULL_BITS = 0x0, /* interrupt fifo quater full */ ADC_INTR_EOC_BITS = 0x1, /* interrupt end of conversion */ ADC_INTR_EOSCAN_BITS = 0x2, /* interrupt end of scan */ ADC_INTR_EOSEQ_BITS = 0x3, /* interrupt end of sequence (probably wont use this it's pretty fancy) */ EN_ADC_INTR_SRC_BIT = 0x4, /* enable adc interrupt source */ EN_ADC_DONE_INTR_BIT = 0x8, /* enable adc acquisition done interrupt */ DAC_INTR_SRC_MASK = 0x30, DAC_INTR_QEMPTY_BITS = 0x0, DAC_INTR_HIGH_CHAN_BITS = 0x10, EN_DAC_INTR_SRC_BIT = 0x40, /* enable dac interrupt source */ EN_DAC_DONE_INTR_BIT = 0x80, EN_ADC_ACTIVE_INTR_BIT = 0x200, /* enable adc active interrupt */ EN_ADC_STOP_INTR_BIT = 0x400, /* enable adc stop trigger interrupt */ EN_DAC_ACTIVE_INTR_BIT = 0x800, /* enable dac active interrupt */ EN_DAC_UNDERRUN_BIT = 0x4000, /* enable dac underrun status bit */ EN_ADC_OVERRUN_BIT = 0x8000, /* enable adc overrun status bit */ }; enum hw_config_contents { MASTER_CLOCK_4020_MASK = 0x3, /* bits that specify master clock source for 4020 */ INTERNAL_CLOCK_4020_BITS = 0x1, /* use 40 MHz internal master clock for 4020 */ BNC_CLOCK_4020_BITS = 0x2, /* use BNC input for master clock */ EXT_CLOCK_4020_BITS = 0x3, /* use dio input for master clock */ EXT_QUEUE_BIT = 0x200, /* use external channel/gain queue (more versatile than internal queue) */ SLOW_DAC_BIT = 0x400, /* use 225 nanosec strobe when loading dac instead of 50 nanosec */ HW_CONFIG_DUMMY_BITS = 0x2000, /* bit with unknown function yet given as default value in pci-das64 manual */ DMA_CH_SELECT_BIT = 0x8000, /* bit selects channels 1/0 for analog input/output, otherwise 0/1 */ FIFO_SIZE_REG = 0x4, /* allows adjustment of fifo sizes */ DAC_FIFO_SIZE_MASK = 0xff00, /* bits that set dac fifo size */ DAC_FIFO_BITS = 0xf800, /* 8k sample ao fifo */ }; #define DAC_FIFO_SIZE 0x2000 enum daq_atrig_low_4020_contents { EXT_AGATE_BNC_BIT = 0x8000, /* use trig/ext clk bnc input for analog gate signal */ EXT_STOP_TRIG_BNC_BIT = 0x4000, /* use trig/ext clk bnc input for external stop trigger signal */ EXT_START_TRIG_BNC_BIT = 0x2000, /* use trig/ext clk bnc input for external start trigger signal */ }; static inline uint16_t analog_trig_low_threshold_bits(uint16_t threshold) { return threshold & 0xfff; } enum adc_control0_contents { ADC_GATE_SRC_MASK = 0x3, /* bits that select gate */ ADC_SOFT_GATE_BITS = 0x1, /* software gate */ ADC_EXT_GATE_BITS = 0x2, /* external digital gate */ ADC_ANALOG_GATE_BITS = 0x3, /* analog level gate */ ADC_GATE_LEVEL_BIT = 0x4, /* level-sensitive gate (for digital) */ ADC_GATE_POLARITY_BIT = 0x8, /* gate active low */ ADC_START_TRIG_SOFT_BITS = 0x10, ADC_START_TRIG_EXT_BITS = 0x20, ADC_START_TRIG_ANALOG_BITS = 0x30, ADC_START_TRIG_MASK = 0x30, ADC_START_TRIG_FALLING_BIT = 0x40, /* trig 1 uses falling edge */ ADC_EXT_CONV_FALLING_BIT = 0x800, /* external pacing uses falling edge */ ADC_SAMPLE_COUNTER_EN_BIT = 0x1000, /* enable hardware scan counter */ ADC_DMA_DISABLE_BIT = 0x4000, /* disables dma */ ADC_ENABLE_BIT = 0x8000, /* master adc enable */ }; enum adc_control1_contents { ADC_QUEUE_CONFIG_BIT = 0x1, /* should be set for boards with > 16 channels */ CONVERT_POLARITY_BIT = 0x10, EOC_POLARITY_BIT = 0x20, ADC_SW_GATE_BIT = 0x40, /* software gate of adc */ ADC_DITHER_BIT = 0x200, /* turn on extra noise for dithering */ RETRIGGER_BIT = 0x800, ADC_LO_CHANNEL_4020_MASK = 0x300, ADC_HI_CHANNEL_4020_MASK = 0xc00, TWO_CHANNEL_4020_BITS = 0x1000, /* two channel mode for 4020 */ FOUR_CHANNEL_4020_BITS = 0x2000, /* four channel mode for 4020 */ CHANNEL_MODE_4020_MASK = 0x3000, ADC_MODE_MASK = 0xf000, }; static inline uint16_t adc_lo_chan_4020_bits(unsigned int channel) { return (channel & 0x3) << 8; }; static inline uint16_t adc_hi_chan_4020_bits(unsigned int channel) { return (channel & 0x3) << 10; }; static inline uint16_t adc_mode_bits(unsigned int mode) { return (mode & 0xf) << 12; }; enum calibration_contents { SELECT_8800_BIT = 0x1, SELECT_8402_64XX_BIT = 0x2, SELECT_1590_60XX_BIT = 0x2, CAL_EN_64XX_BIT = 0x40, /* calibration enable for 64xx series */ SERIAL_DATA_IN_BIT = 0x80, SERIAL_CLOCK_BIT = 0x100, CAL_EN_60XX_BIT = 0x200, /* calibration enable for 60xx series */ CAL_GAIN_BIT = 0x800, }; /* calibration sources for 6025 are: * 0 : ground * 1 : 10V * 2 : 5V * 3 : 0.5V * 4 : 0.05V * 5 : ground * 6 : dac channel 0 * 7 : dac channel 1 */ static inline uint16_t adc_src_bits(unsigned int source) { return (source & 0xf) << 3; }; static inline uint16_t adc_convert_chan_4020_bits(unsigned int channel) { return (channel & 0x3) << 8; }; enum adc_queue_load_contents { UNIP_BIT = 0x800, /* unipolar/bipolar bit */ ADC_SE_DIFF_BIT = 0x1000, /* single-ended/ differential bit */ ADC_COMMON_BIT = 0x2000, /* non-referenced single-ended (common-mode input) */ QUEUE_EOSEQ_BIT = 0x4000, /* queue end of sequence */ QUEUE_EOSCAN_BIT = 0x8000, /* queue end of scan */ }; static inline uint16_t adc_chan_bits(unsigned int channel) { return channel & 0x3f; }; enum dac_control0_contents { DAC_ENABLE_BIT = 0x8000, /* dac controller enable bit */ DAC_CYCLIC_STOP_BIT = 0x4000, DAC_WAVEFORM_MODE_BIT = 0x100, DAC_EXT_UPDATE_FALLING_BIT = 0x80, DAC_EXT_UPDATE_ENABLE_BIT = 0x40, WAVEFORM_TRIG_MASK = 0x30, WAVEFORM_TRIG_DISABLED_BITS = 0x0, WAVEFORM_TRIG_SOFT_BITS = 0x10, WAVEFORM_TRIG_EXT_BITS = 0x20, WAVEFORM_TRIG_ADC1_BITS = 0x30, WAVEFORM_TRIG_FALLING_BIT = 0x8, WAVEFORM_GATE_LEVEL_BIT = 0x4, WAVEFORM_GATE_ENABLE_BIT = 0x2, WAVEFORM_GATE_SELECT_BIT = 0x1, }; enum dac_control1_contents { DAC_WRITE_POLARITY_BIT = 0x800, /* board-dependent setting */ DAC1_EXT_REF_BIT = 0x200, DAC0_EXT_REF_BIT = 0x100, DAC_OUTPUT_ENABLE_BIT = 0x80, /* dac output enable bit */ DAC_UPDATE_POLARITY_BIT = 0x40, /* board-dependent setting */ DAC_SW_GATE_BIT = 0x20, DAC1_UNIPOLAR_BIT = 0x8, DAC0_UNIPOLAR_BIT = 0x2, }; /* bit definitions for read-only registers */ enum hw_status_contents { DAC_UNDERRUN_BIT = 0x1, ADC_OVERRUN_BIT = 0x2, DAC_ACTIVE_BIT = 0x4, ADC_ACTIVE_BIT = 0x8, DAC_INTR_PENDING_BIT = 0x10, ADC_INTR_PENDING_BIT = 0x20, DAC_DONE_BIT = 0x40, ADC_DONE_BIT = 0x80, EXT_INTR_PENDING_BIT = 0x100, ADC_STOP_BIT = 0x200, }; static inline uint16_t pipe_full_bits(uint16_t hw_status_bits) { return (hw_status_bits >> 10) & 0x3; }; static inline unsigned int dma_chain_flag_bits(uint16_t prepost_bits) { return (prepost_bits >> 6) & 0x3; } static inline unsigned int adc_upper_read_ptr_code(uint16_t prepost_bits) { return (prepost_bits >> 12) & 0x3; } static inline unsigned int adc_upper_write_ptr_code(uint16_t prepost_bits) { return (prepost_bits >> 14) & 0x3; } /* I2C addresses for 4020 */ enum i2c_addresses { RANGE_CAL_I2C_ADDR = 0x20, CALDAC0_I2C_ADDR = 0xc, CALDAC1_I2C_ADDR = 0xd, }; enum range_cal_i2c_contents { ADC_SRC_4020_MASK = 0x70, /* bits that set what source the adc converter measures */ BNC_TRIG_THRESHOLD_0V_BIT = 0x80, /* make bnc trig/ext clock threshold 0V instead of 2.5V */ }; static inline uint8_t adc_src_4020_bits(unsigned int source) { return (source << 4) & ADC_SRC_4020_MASK; }; static inline uint8_t attenuate_bit(unsigned int channel) { /* attenuate channel (+-5V input range) */ return 1 << (channel & 0x3); }; /* analog input ranges for 64xx boards */ static const struct comedi_lrange ai_ranges_64xx = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; /* analog input ranges for 60xx boards */ static const struct comedi_lrange ai_ranges_60xx = { 4, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(0.5), BIP_RANGE(0.05), } }; /* analog input ranges for 6030, etc boards */ static const struct comedi_lrange ai_ranges_6030 = { 14, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2), BIP_RANGE(1), BIP_RANGE(0.5), BIP_RANGE(0.2), BIP_RANGE(0.1), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2), UNI_RANGE(1), UNI_RANGE(0.5), UNI_RANGE(0.2), UNI_RANGE(0.1), } }; /* analog input ranges for 6052, etc boards */ static const struct comedi_lrange ai_ranges_6052 = { 15, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1), BIP_RANGE(0.5), BIP_RANGE(0.25), BIP_RANGE(0.1), BIP_RANGE(0.05), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2), UNI_RANGE(1), UNI_RANGE(0.5), UNI_RANGE(0.2), UNI_RANGE(0.1), } }; /* analog input ranges for 4020 board */ static const struct comedi_lrange ai_ranges_4020 = { 2, { BIP_RANGE(5), BIP_RANGE(1), } }; /* analog output ranges */ static const struct comedi_lrange ao_ranges_64xx = { 4, { BIP_RANGE(5), BIP_RANGE(10), UNI_RANGE(5), UNI_RANGE(10), } }; static const int ao_range_code_64xx[] = { 0x0, 0x1, 0x2, 0x3, }; static const struct comedi_lrange ao_ranges_60xx = { 1, { BIP_RANGE(10), } }; static const int ao_range_code_60xx[] = { 0x0, }; static const struct comedi_lrange ao_ranges_6030 = { 2, { BIP_RANGE(10), UNI_RANGE(10), } }; static const int ao_range_code_6030[] = { 0x0, 0x2, }; static const struct comedi_lrange ao_ranges_4020 = { 2, { BIP_RANGE(5), BIP_RANGE(10), } }; static const int ao_range_code_4020[] = { 0x1, 0x0, }; enum register_layout { LAYOUT_60XX, LAYOUT_64XX, LAYOUT_4020, }; struct hw_fifo_info { unsigned int num_segments; unsigned int max_segment_length; unsigned int sample_packing_ratio; uint16_t fifo_size_reg_mask; }; struct pcidas64_board { const char *name; int device_id; /* pci device id */ int ai_se_chans; /* number of ai inputs in single-ended mode */ int ai_bits; /* analog input resolution */ int ai_speed; /* fastest conversion period in ns */ const struct comedi_lrange *ai_range_table; int ao_nchan; /* number of analog out channels */ int ao_bits; /* analog output resolution */ int ao_scan_speed; /* analog output speed (for a scan, not conversion) */ const struct comedi_lrange *ao_range_table; const int *ao_range_code; const struct hw_fifo_info *const ai_fifo; enum register_layout layout; /* different board families have slightly different registers */ unsigned has_8255:1; }; static const struct hw_fifo_info ai_fifo_4020 = { .num_segments = 2, .max_segment_length = 0x8000, .sample_packing_ratio = 2, .fifo_size_reg_mask = 0x7f, }; static const struct hw_fifo_info ai_fifo_64xx = { .num_segments = 4, .max_segment_length = 0x800, .sample_packing_ratio = 1, .fifo_size_reg_mask = 0x3f, }; static const struct hw_fifo_info ai_fifo_60xx = { .num_segments = 4, .max_segment_length = 0x800, .sample_packing_ratio = 1, .fifo_size_reg_mask = 0x7f, }; /* maximum number of dma transfers we will chain together into a ring * (and the maximum number of dma buffers we maintain) */ #define MAX_AI_DMA_RING_COUNT (0x80000 / DMA_BUFFER_SIZE) #define MIN_AI_DMA_RING_COUNT (0x10000 / DMA_BUFFER_SIZE) #define AO_DMA_RING_COUNT (0x10000 / DMA_BUFFER_SIZE) static inline unsigned int ai_dma_ring_count(struct pcidas64_board *board) { if (board->layout == LAYOUT_4020) return MAX_AI_DMA_RING_COUNT; else return MIN_AI_DMA_RING_COUNT; } static const int bytes_in_sample = 2; static const struct pcidas64_board pcidas64_boards[] = { { .name = "pci-das6402/16", .device_id = 0x1d, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das6402/12", /* XXX check */ .device_id = 0x1e, .ai_se_chans = 64, .ai_bits = 12, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m1/16", .device_id = 0x35, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 1000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m2/16", .device_id = 0x36, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 500, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m3/16", .device_id = 0x37, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 333, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ao_range_table = &ao_ranges_64xx, .ao_range_code = ao_range_code_64xx, .ai_fifo = &ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das6013", .device_id = 0x78, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .ao_bits = 16, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6014", .device_id = 0x79, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6023", .device_id = 0x5d, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 5000, .ao_nchan = 0, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 1, }, { .name = "pci-das6025", .device_id = 0x5e, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 1, }, { .name = "pci-das6030", .device_id = 0x5f, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6031", .device_id = 0x60, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 10000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6032", .device_id = 0x61, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 0, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6033", .device_id = 0x62, .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 10000, .ao_nchan = 0, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6034", .device_id = 0x63, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .ao_scan_speed = 0, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6035", .device_id = 0x64, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6036", .device_id = 0x6f, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 100000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_60xx, .ao_range_table = &ao_ranges_60xx, .ao_range_code = ao_range_code_60xx, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6040", .device_id = 0x65, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 2000, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 1000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6052", .device_id = 0x66, .ai_se_chans = 16, .ai_bits = 16, .ai_speed = 3333, .ao_nchan = 2, .ao_bits = 16, .ao_scan_speed = 3333, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6070", .device_id = 0x67, .ai_se_chans = 16, .ai_bits = 12, .ai_speed = 800, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 1000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das6071", .device_id = 0x68, .ai_se_chans = 64, .ai_bits = 12, .ai_speed = 800, .ao_nchan = 2, .ao_bits = 12, .ao_scan_speed = 1000, .layout = LAYOUT_60XX, .ai_range_table = &ai_ranges_6052, .ao_range_table = &ao_ranges_6030, .ao_range_code = ao_range_code_6030, .ai_fifo = &ai_fifo_60xx, .has_8255 = 0, }, { .name = "pci-das4020/12", .device_id = 0x52, .ai_se_chans = 4, .ai_bits = 12, .ai_speed = 50, .ao_bits = 12, .ao_nchan = 2, .ao_scan_speed = 0, /* no hardware pacing on ao */ .layout = LAYOUT_4020, .ai_range_table = &ai_ranges_4020, .ao_range_table = &ao_ranges_4020, .ao_range_code = ao_range_code_4020, .ai_fifo = &ai_fifo_4020, .has_8255 = 1, }, #if 0 { .name = "pci-das6402/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m1/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 1000, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m2/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 500, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m3/16/jr", .device_id = 0 /* XXX, */ .ai_se_chans = 64, .ai_bits = 16, .ai_speed = 333, .ao_nchan = 0, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m1/14", .device_id = 0, /* XXX */ .ai_se_chans = 64, .ai_bits = 14, .ai_speed = 1000, .ao_nchan = 2, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m2/14", .device_id = 0, /* XXX */ .ai_se_chans = 64, .ai_bits = 14, .ai_speed = 500, .ao_nchan = 2, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, { .name = "pci-das64/m3/14", .device_id = 0, /* XXX */ .ai_se_chans = 64, .ai_bits = 14, .ai_speed = 333, .ao_nchan = 2, .ao_scan_speed = 10000, .layout = LAYOUT_64XX, .ai_range_table = &ai_ranges_64xx, .ai_fifo = ai_fifo_64xx, .has_8255 = 1, }, #endif }; static DEFINE_PCI_DEVICE_TABLE(pcidas64_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x001d) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x001e) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0035) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0036) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0037) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0052) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x005d) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x005e) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x005f) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0061) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0062) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0063) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0064) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0066) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0067) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0068) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x006f) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0078) }, { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, 0x0079) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pcidas64_pci_table); static inline struct pcidas64_board *board(const struct comedi_device *dev) { return (struct pcidas64_board *)dev->board_ptr; } static inline unsigned short se_diff_bit_6xxx(struct comedi_device *dev, int use_differential) { if ((board(dev)->layout == LAYOUT_64XX && !use_differential) || (board(dev)->layout == LAYOUT_60XX && use_differential)) return ADC_SE_DIFF_BIT; else return 0; }; struct ext_clock_info { unsigned int divisor; /* master clock divisor to use for scans with external master clock */ unsigned int chanspec; /* chanspec for master clock input when used as scan begin src */ }; /* this structure is for data unique to this hardware driver. */ struct pcidas64_private { struct pci_dev *hw_dev; /* pointer to board's pci_dev struct */ /* base addresses (physical) */ resource_size_t plx9080_phys_iobase; resource_size_t main_phys_iobase; resource_size_t dio_counter_phys_iobase; /* base addresses (ioremapped) */ void __iomem *plx9080_iobase; void __iomem *main_iobase; void __iomem *dio_counter_iobase; /* local address (used by dma controller) */ uint32_t local0_iobase; uint32_t local1_iobase; volatile unsigned int ai_count; /* number of analog input samples remaining */ uint16_t *ai_buffer[MAX_AI_DMA_RING_COUNT]; /* dma buffers for analog input */ dma_addr_t ai_buffer_bus_addr[MAX_AI_DMA_RING_COUNT]; /* physical addresses of ai dma buffers */ struct plx_dma_desc *ai_dma_desc; /* array of ai dma descriptors read by plx9080, allocated to get proper alignment */ dma_addr_t ai_dma_desc_bus_addr; /* physical address of ai dma descriptor array */ volatile unsigned int ai_dma_index; /* index of the ai dma descriptor/buffer that is currently being used */ uint16_t *ao_buffer[AO_DMA_RING_COUNT]; /* dma buffers for analog output */ dma_addr_t ao_buffer_bus_addr[AO_DMA_RING_COUNT]; /* physical addresses of ao dma buffers */ struct plx_dma_desc *ao_dma_desc; dma_addr_t ao_dma_desc_bus_addr; volatile unsigned int ao_dma_index; /* keeps track of buffer where the next ao sample should go */ volatile unsigned long ao_count; /* number of analog output samples remaining */ volatile unsigned int ao_value[2]; /* remember what the analog outputs are set to, to allow readback */ unsigned int hw_revision; /* stc chip hardware revision number */ volatile unsigned int intr_enable_bits; /* last bits sent to INTR_ENABLE_REG register */ volatile uint16_t adc_control1_bits; /* last bits sent to ADC_CONTROL1_REG register */ volatile uint16_t fifo_size_bits; /* last bits sent to FIFO_SIZE_REG register */ volatile uint16_t hw_config_bits; /* last bits sent to HW_CONFIG_REG register */ volatile uint16_t dac_control1_bits; volatile uint32_t plx_control_bits; /* last bits written to plx9080 control register */ volatile uint32_t plx_intcsr_bits; /* last bits written to plx interrupt control and status register */ volatile int calibration_source; /* index of calibration source readable through ai ch0 */ volatile uint8_t i2c_cal_range_bits; /* bits written to i2c calibration/range register */ volatile unsigned int ext_trig_falling; /* configure digital triggers to trigger on falling edge */ /* states of various devices stored to enable read-back */ unsigned int ad8402_state[2]; unsigned int caldac_state[8]; volatile short ai_cmd_running; unsigned int ai_fifo_segment_length; struct ext_clock_info ext_clock; short ao_bounce_buffer[DAC_FIFO_SIZE]; }; /* inline function that makes it easier to * access the private structure. */ static inline struct pcidas64_private *priv(struct comedi_device *dev) { return dev->private; } /* * The comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int attach(struct comedi_device *dev, struct comedi_devconfig *it); static int detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidas = { .driver_name = "cb_pcidas64", .module = THIS_MODULE, .attach = attach, .detach = detach, }; static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *subdev, unsigned int trig_num); static int ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static irqreturn_t handle_interrupt(int irq, void *d); static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int dio_callback(int dir, int port, int data, unsigned long arg); static int dio_callback_4020(int dir, int port, int data, unsigned long arg); static int di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dio_60xx_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dio_60xx_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int calib_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int calib_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ad8402_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static void ad8402_write(struct comedi_device *dev, unsigned int channel, unsigned int value); static int ad8402_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static void check_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); static unsigned int get_divisor(unsigned int ns, unsigned int flags); static void i2c_write(struct comedi_device *dev, unsigned int address, const uint8_t * data, unsigned int length); static void caldac_write(struct comedi_device *dev, unsigned int channel, unsigned int value); static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value); /* static int dac_1590_write(struct comedi_device *dev, unsigned int dac_a, unsigned int dac_b); */ static int caldac_i2c_write(struct comedi_device *dev, unsigned int caldac_channel, unsigned int value); static void abort_dma(struct comedi_device *dev, unsigned int channel); static void disable_plx_interrupts(struct comedi_device *dev); static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples); static unsigned int ai_fifo_size(struct comedi_device *dev); static int set_ai_fifo_segment_length(struct comedi_device *dev, unsigned int num_entries); static void disable_ai_pacing(struct comedi_device *dev); static void disable_ai_interrupts(struct comedi_device *dev); static void enable_ai_interrupts(struct comedi_device *dev, const struct comedi_cmd *cmd); static unsigned int get_ao_divisor(unsigned int ns, unsigned int flags); static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd); static int __devinit driver_cb_pcidas_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_cb_pcidas.driver_name); } static void __devexit driver_cb_pcidas_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_cb_pcidas_pci_driver = { .id_table = pcidas64_pci_table, .probe = &driver_cb_pcidas_pci_probe, .remove = __devexit_p(&driver_cb_pcidas_pci_remove) }; static int __init driver_cb_pcidas_init_module(void) { int retval; retval = comedi_driver_register(&driver_cb_pcidas); if (retval < 0) return retval; driver_cb_pcidas_pci_driver.name = (char *)driver_cb_pcidas.driver_name; return pci_register_driver(&driver_cb_pcidas_pci_driver); } static void __exit driver_cb_pcidas_cleanup_module(void) { pci_unregister_driver(&driver_cb_pcidas_pci_driver); comedi_driver_unregister(&driver_cb_pcidas); } module_init(driver_cb_pcidas_init_module); module_exit(driver_cb_pcidas_cleanup_module); static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev, unsigned int range_index) { const struct comedi_krange *range = &board(dev)->ai_range_table->range[range_index]; unsigned int bits = 0; switch (range->max) { case 10000000: bits = 0x000; break; case 5000000: bits = 0x100; break; case 2000000: case 2500000: bits = 0x200; break; case 1000000: case 1250000: bits = 0x300; break; case 500000: bits = 0x400; break; case 200000: case 250000: bits = 0x500; break; case 100000: bits = 0x600; break; case 50000: bits = 0x700; break; default: comedi_error(dev, "bug! in ai_range_bits_6xxx"); break; } if (range->min == 0) bits += 0x900; return bits; } static unsigned int hw_revision(const struct comedi_device *dev, uint16_t hw_status_bits) { if (board(dev)->layout == LAYOUT_4020) return (hw_status_bits >> 13) & 0x7; return (hw_status_bits >> 12) & 0xf; } static void set_dac_range_bits(struct comedi_device *dev, volatile uint16_t * bits, unsigned int channel, unsigned int range) { unsigned int code = board(dev)->ao_range_code[range]; if (channel > 1) comedi_error(dev, "bug! bad channel?"); if (code & ~0x3) comedi_error(dev, "bug! bad range code?"); *bits &= ~(0x3 << (2 * channel)); *bits |= code << (2 * channel); }; static inline int ao_cmd_is_supported(const struct pcidas64_board *board) { return board->ao_nchan && board->layout != LAYOUT_4020; } /* initialize plx9080 chip */ static void init_plx9080(struct comedi_device *dev) { uint32_t bits; void __iomem *plx_iobase = priv(dev)->plx9080_iobase; priv(dev)->plx_control_bits = readl(priv(dev)->plx9080_iobase + PLX_CONTROL_REG); /* plx9080 dump */ DEBUG_PRINT(" plx interrupt status 0x%x\n", readl(plx_iobase + PLX_INTRCS_REG)); DEBUG_PRINT(" plx id bits 0x%x\n", readl(plx_iobase + PLX_ID_REG)); DEBUG_PRINT(" plx control reg 0x%x\n", priv(dev)->plx_control_bits); DEBUG_PRINT(" plx mode/arbitration reg 0x%x\n", readl(plx_iobase + PLX_MARB_REG)); DEBUG_PRINT(" plx region0 reg 0x%x\n", readl(plx_iobase + PLX_REGION0_REG)); DEBUG_PRINT(" plx region1 reg 0x%x\n", readl(plx_iobase + PLX_REGION1_REG)); DEBUG_PRINT(" plx revision 0x%x\n", readl(plx_iobase + PLX_REVISION_REG)); DEBUG_PRINT(" plx dma channel 0 mode 0x%x\n", readl(plx_iobase + PLX_DMA0_MODE_REG)); DEBUG_PRINT(" plx dma channel 1 mode 0x%x\n", readl(plx_iobase + PLX_DMA1_MODE_REG)); DEBUG_PRINT(" plx dma channel 0 pci address 0x%x\n", readl(plx_iobase + PLX_DMA0_PCI_ADDRESS_REG)); DEBUG_PRINT(" plx dma channel 0 local address 0x%x\n", readl(plx_iobase + PLX_DMA0_LOCAL_ADDRESS_REG)); DEBUG_PRINT(" plx dma channel 0 transfer size 0x%x\n", readl(plx_iobase + PLX_DMA0_TRANSFER_SIZE_REG)); DEBUG_PRINT(" plx dma channel 0 descriptor 0x%x\n", readl(plx_iobase + PLX_DMA0_DESCRIPTOR_REG)); DEBUG_PRINT(" plx dma channel 0 command status 0x%x\n", readb(plx_iobase + PLX_DMA0_CS_REG)); DEBUG_PRINT(" plx dma channel 0 threshold 0x%x\n", readl(plx_iobase + PLX_DMA0_THRESHOLD_REG)); DEBUG_PRINT(" plx bigend 0x%x\n", readl(plx_iobase + PLX_BIGEND_REG)); #ifdef __BIG_ENDIAN bits = BIGEND_DMA0 | BIGEND_DMA1; #else bits = 0; #endif writel(bits, priv(dev)->plx9080_iobase + PLX_BIGEND_REG); disable_plx_interrupts(dev); abort_dma(dev, 0); abort_dma(dev, 1); /* configure dma0 mode */ bits = 0; /* enable ready input, not sure if this is necessary */ bits |= PLX_DMA_EN_READYIN_BIT; /* enable bterm, not sure if this is necessary */ bits |= PLX_EN_BTERM_BIT; /* enable dma chaining */ bits |= PLX_EN_CHAIN_BIT; /* enable interrupt on dma done (probably don't need this, since chain never finishes) */ bits |= PLX_EN_DMA_DONE_INTR_BIT; /* don't increment local address during transfers (we are transferring from a fixed fifo register) */ bits |= PLX_LOCAL_ADDR_CONST_BIT; /* route dma interrupt to pci bus */ bits |= PLX_DMA_INTR_PCI_BIT; /* enable demand mode */ bits |= PLX_DEMAND_MODE_BIT; /* enable local burst mode */ bits |= PLX_DMA_LOCAL_BURST_EN_BIT; /* 4020 uses 32 bit dma */ if (board(dev)->layout == LAYOUT_4020) { bits |= PLX_LOCAL_BUS_32_WIDE_BITS; } else { /* localspace0 bus is 16 bits wide */ bits |= PLX_LOCAL_BUS_16_WIDE_BITS; } writel(bits, plx_iobase + PLX_DMA1_MODE_REG); if (ao_cmd_is_supported(board(dev))) writel(bits, plx_iobase + PLX_DMA0_MODE_REG); /* enable interrupts on plx 9080 */ priv(dev)->plx_intcsr_bits |= ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE | ICS_DMA0_E | ICS_DMA1_E; writel(priv(dev)->plx_intcsr_bits, priv(dev)->plx9080_iobase + PLX_INTRCS_REG); } /* Allocate and initialize the subdevice structures. */ static int setup_subdevices(struct comedi_device *dev) { struct comedi_subdevice *s; void __iomem *dio_8255_iobase; int i; if (alloc_subdevices(dev, 10) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog input subdevice */ dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DITHER | SDF_CMD_READ; if (board(dev)->layout == LAYOUT_60XX) s->subdev_flags |= SDF_COMMON | SDF_DIFF; else if (board(dev)->layout == LAYOUT_64XX) s->subdev_flags |= SDF_DIFF; /* XXX Number of inputs in differential mode is ignored */ s->n_chan = board(dev)->ai_se_chans; s->len_chanlist = 0x2000; s->maxdata = (1 << board(dev)->ai_bits) - 1; s->range_table = board(dev)->ai_range_table; s->insn_read = ai_rinsn; s->insn_config = ai_config_insn; s->do_cmd = ai_cmd; s->do_cmdtest = ai_cmdtest; s->cancel = ai_cancel; if (board(dev)->layout == LAYOUT_4020) { uint8_t data; /* set adc to read from inputs (not internal calibration sources) */ priv(dev)->i2c_cal_range_bits = adc_src_4020_bits(4); /* set channels to +-5 volt input ranges */ for (i = 0; i < s->n_chan; i++) priv(dev)->i2c_cal_range_bits |= attenuate_bit(i); data = priv(dev)->i2c_cal_range_bits; i2c_write(dev, RANGE_CAL_I2C_ADDR, &data, sizeof(data)); } /* analog output subdevice */ s = dev->subdevices + 1; if (board(dev)->ao_nchan) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE; s->n_chan = board(dev)->ao_nchan; s->maxdata = (1 << board(dev)->ao_bits) - 1; s->range_table = board(dev)->ao_range_table; s->insn_read = ao_readback_insn; s->insn_write = ao_winsn; if (ao_cmd_is_supported(board(dev))) { dev->write_subdev = s; s->do_cmdtest = ao_cmdtest; s->do_cmd = ao_cmd; s->len_chanlist = board(dev)->ao_nchan; s->cancel = ao_cancel; } } else { s->type = COMEDI_SUBD_UNUSED; } /* digital input */ s = dev->subdevices + 2; if (board(dev)->layout == LAYOUT_64XX) { s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = di_rbits; } else s->type = COMEDI_SUBD_UNUSED; /* digital output */ if (board(dev)->layout == LAYOUT_64XX) { s = dev->subdevices + 3; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = do_wbits; } else s->type = COMEDI_SUBD_UNUSED; /* 8255 */ s = dev->subdevices + 4; if (board(dev)->has_8255) { if (board(dev)->layout == LAYOUT_4020) { dio_8255_iobase = priv(dev)->main_iobase + I8255_4020_REG; subdev_8255_init(dev, s, dio_callback_4020, (unsigned long)dio_8255_iobase); } else { dio_8255_iobase = priv(dev)->dio_counter_iobase + DIO_8255_OFFSET; subdev_8255_init(dev, s, dio_callback, (unsigned long)dio_8255_iobase); } } else s->type = COMEDI_SUBD_UNUSED; /* 8 channel dio for 60xx */ s = dev->subdevices + 5; if (board(dev)->layout == LAYOUT_60XX) { s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_config = dio_60xx_config_insn; s->insn_bits = dio_60xx_wbits; } else s->type = COMEDI_SUBD_UNUSED; /* caldac */ s = dev->subdevices + 6; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 8; if (board(dev)->layout == LAYOUT_4020) s->maxdata = 0xfff; else s->maxdata = 0xff; s->insn_read = calib_read_insn; s->insn_write = calib_write_insn; for (i = 0; i < s->n_chan; i++) caldac_write(dev, i, s->maxdata / 2); /* 2 channel ad8402 potentiometer */ s = dev->subdevices + 7; if (board(dev)->layout == LAYOUT_64XX) { s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 2; s->insn_read = ad8402_read_insn; s->insn_write = ad8402_write_insn; s->maxdata = 0xff; for (i = 0; i < s->n_chan; i++) ad8402_write(dev, i, s->maxdata / 2); } else s->type = COMEDI_SUBD_UNUSED; /* serial EEPROM, if present */ s = dev->subdevices + 8; if (readl(priv(dev)->plx9080_iobase + PLX_CONTROL_REG) & CTL_EECHK) { s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 128; s->maxdata = 0xffff; s->insn_read = eeprom_read_insn; } else s->type = COMEDI_SUBD_UNUSED; /* user counter subd XXX */ s = dev->subdevices + 9; s->type = COMEDI_SUBD_UNUSED; return 0; } static void disable_plx_interrupts(struct comedi_device *dev) { priv(dev)->plx_intcsr_bits = 0; writel(priv(dev)->plx_intcsr_bits, priv(dev)->plx9080_iobase + PLX_INTRCS_REG); } static void init_stc_registers(struct comedi_device *dev) { uint16_t bits; unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* bit should be set for 6025, although docs say boards with <= 16 chans should be cleared XXX */ if (1) priv(dev)->adc_control1_bits |= ADC_QUEUE_CONFIG_BIT; writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); /* 6402/16 manual says this register must be initialized to 0xff? */ writew(0xff, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG); bits = SLOW_DAC_BIT | DMA_CH_SELECT_BIT; if (board(dev)->layout == LAYOUT_4020) bits |= INTERNAL_CLOCK_4020_BITS; priv(dev)->hw_config_bits |= bits; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); writew(0, priv(dev)->main_iobase + DAQ_SYNC_REG); writew(0, priv(dev)->main_iobase + CALIBRATION_REG); spin_unlock_irqrestore(&dev->spinlock, flags); /* set fifos to maximum size */ priv(dev)->fifo_size_bits |= DAC_FIFO_BITS; set_ai_fifo_segment_length(dev, board(dev)->ai_fifo->max_segment_length); priv(dev)->dac_control1_bits = DAC_OUTPUT_ENABLE_BIT; priv(dev)->intr_enable_bits = /* EN_DAC_INTR_SRC_BIT | DAC_INTR_QEMPTY_BITS | */ EN_DAC_DONE_INTR_BIT | EN_DAC_UNDERRUN_BIT; writew(priv(dev)->intr_enable_bits, priv(dev)->main_iobase + INTR_ENABLE_REG); disable_ai_pacing(dev); }; static int alloc_and_init_dma_members(struct comedi_device *dev) { int i; /* alocate pci dma buffers */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) { priv(dev)->ai_buffer[i] = pci_alloc_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, &priv(dev)->ai_buffer_bus_addr[i]); if (priv(dev)->ai_buffer[i] == NULL) return -ENOMEM; } for (i = 0; i < AO_DMA_RING_COUNT; i++) { if (ao_cmd_is_supported(board(dev))) { priv(dev)->ao_buffer[i] = pci_alloc_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, &priv(dev)-> ao_buffer_bus_addr[i]); if (priv(dev)->ao_buffer[i] == NULL) return -ENOMEM; } } /* allocate dma descriptors */ priv(dev)->ai_dma_desc = pci_alloc_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * ai_dma_ring_count(board(dev)), &priv(dev)->ai_dma_desc_bus_addr); if (priv(dev)->ai_dma_desc == NULL) return -ENOMEM; DEBUG_PRINT("ai dma descriptors start at bus addr 0x%x\n", priv(dev)->ai_dma_desc_bus_addr); if (ao_cmd_is_supported(board(dev))) { priv(dev)->ao_dma_desc = pci_alloc_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * AO_DMA_RING_COUNT, &priv(dev)->ao_dma_desc_bus_addr); if (priv(dev)->ao_dma_desc == NULL) return -ENOMEM; DEBUG_PRINT("ao dma descriptors start at bus addr 0x%x\n", priv(dev)->ao_dma_desc_bus_addr); } /* initialize dma descriptors */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) { priv(dev)->ai_dma_desc[i].pci_start_addr = cpu_to_le32(priv(dev)->ai_buffer_bus_addr[i]); if (board(dev)->layout == LAYOUT_4020) priv(dev)->ai_dma_desc[i].local_start_addr = cpu_to_le32(priv(dev)->local1_iobase + ADC_FIFO_REG); else priv(dev)->ai_dma_desc[i].local_start_addr = cpu_to_le32(priv(dev)->local0_iobase + ADC_FIFO_REG); priv(dev)->ai_dma_desc[i].transfer_size = cpu_to_le32(0); priv(dev)->ai_dma_desc[i].next = cpu_to_le32((priv(dev)->ai_dma_desc_bus_addr + ((i + 1) % ai_dma_ring_count (board (dev))) * sizeof(priv(dev)->ai_dma_desc[0])) | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI); } if (ao_cmd_is_supported(board(dev))) { for (i = 0; i < AO_DMA_RING_COUNT; i++) { priv(dev)->ao_dma_desc[i].pci_start_addr = cpu_to_le32(priv(dev)->ao_buffer_bus_addr[i]); priv(dev)->ao_dma_desc[i].local_start_addr = cpu_to_le32(priv(dev)->local0_iobase + DAC_FIFO_REG); priv(dev)->ao_dma_desc[i].transfer_size = cpu_to_le32(0); priv(dev)->ao_dma_desc[i].next = cpu_to_le32((priv(dev)->ao_dma_desc_bus_addr + ((i + 1) % (AO_DMA_RING_COUNT)) * sizeof(priv(dev)->ao_dma_desc[0])) | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT); } } return 0; } static inline void warn_external_queue(struct comedi_device *dev) { comedi_error(dev, "AO command and AI external channel queue cannot be used simultaneously."); comedi_error(dev, "Use internal AI channel queue (channels must be consecutive and use same range/aref)"); } /* * Attach is called by the Comedi core to configure the driver * for a particular board. */ static int attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; int index; uint32_t local_range, local_decode; int retval; /* * Allocate the private structure area. */ if (alloc_private(dev, sizeof(struct pcidas64_private)) < 0) return -ENOMEM; /* * Probe the device to determine what device in the series it is. */ for_each_pci_dev(pcidev) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_COMPUTERBOARDS) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(pcidas64_boards); index++) { if (pcidas64_boards[index].device_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } priv(dev)->hw_dev = pcidev; dev->board_ptr = pcidas64_boards + index; break; } if (dev->board_ptr) break; } if (dev->board_ptr == NULL) { printk ("No supported ComputerBoards/MeasurementComputing card found\n"); return -EIO; } dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", board(dev)->name, pcidev->bus->number, PCI_SLOT(pcidev->devfn)); if (comedi_pci_enable(pcidev, driver_cb_pcidas.driver_name)) { dev_warn(dev->hw_dev, "failed to enable PCI device and request regions\n"); return -EIO; } pci_set_master(pcidev); /* Initialize dev->board_name */ dev->board_name = board(dev)->name; priv(dev)->plx9080_phys_iobase = pci_resource_start(pcidev, PLX9080_BADDRINDEX); priv(dev)->main_phys_iobase = pci_resource_start(pcidev, MAIN_BADDRINDEX); priv(dev)->dio_counter_phys_iobase = pci_resource_start(pcidev, DIO_COUNTER_BADDRINDEX); /* remap, won't work with 2.0 kernels but who cares */ priv(dev)->plx9080_iobase = ioremap(priv(dev)->plx9080_phys_iobase, pci_resource_len(pcidev, PLX9080_BADDRINDEX)); priv(dev)->main_iobase = ioremap(priv(dev)->main_phys_iobase, pci_resource_len(pcidev, MAIN_BADDRINDEX)); priv(dev)->dio_counter_iobase = ioremap(priv(dev)->dio_counter_phys_iobase, pci_resource_len(pcidev, DIO_COUNTER_BADDRINDEX)); if (!priv(dev)->plx9080_iobase || !priv(dev)->main_iobase || !priv(dev)->dio_counter_iobase) { dev_warn(dev->hw_dev, "failed to remap io memory\n"); return -ENOMEM; } DEBUG_PRINT(" plx9080 remapped to 0x%p\n", priv(dev)->plx9080_iobase); DEBUG_PRINT(" main remapped to 0x%p\n", priv(dev)->main_iobase); DEBUG_PRINT(" diocounter remapped to 0x%p\n", priv(dev)->dio_counter_iobase); /* figure out what local addresses are */ local_range = readl(priv(dev)->plx9080_iobase + PLX_LAS0RNG_REG) & LRNG_MEM_MASK; local_decode = readl(priv(dev)->plx9080_iobase + PLX_LAS0MAP_REG) & local_range & LMAP_MEM_MASK; priv(dev)->local0_iobase = ((uint32_t) priv(dev)->main_phys_iobase & ~local_range) | local_decode; local_range = readl(priv(dev)->plx9080_iobase + PLX_LAS1RNG_REG) & LRNG_MEM_MASK; local_decode = readl(priv(dev)->plx9080_iobase + PLX_LAS1MAP_REG) & local_range & LMAP_MEM_MASK; priv(dev)->local1_iobase = ((uint32_t) priv(dev)->dio_counter_phys_iobase & ~local_range) | local_decode; DEBUG_PRINT(" local 0 io addr 0x%x\n", priv(dev)->local0_iobase); DEBUG_PRINT(" local 1 io addr 0x%x\n", priv(dev)->local1_iobase); retval = alloc_and_init_dma_members(dev); if (retval < 0) return retval; priv(dev)->hw_revision = hw_revision(dev, readw(priv(dev)->main_iobase + HW_STATUS_REG)); dev_dbg(dev->hw_dev, "stc hardware revision %i\n", priv(dev)->hw_revision); init_plx9080(dev); init_stc_registers(dev); /* get irq */ if (request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, "cb_pcidas64", dev)) { dev_dbg(dev->hw_dev, "unable to allocate irq %u\n", pcidev->irq); return -EINVAL; } dev->irq = pcidev->irq; dev_dbg(dev->hw_dev, "irq %u\n", dev->irq); retval = setup_subdevices(dev); if (retval < 0) return retval; return 0; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int detach(struct comedi_device *dev) { unsigned int i; if (dev->irq) free_irq(dev->irq, dev); if (priv(dev)) { if (priv(dev)->hw_dev) { if (priv(dev)->plx9080_iobase) { disable_plx_interrupts(dev); iounmap(priv(dev)->plx9080_iobase); } if (priv(dev)->main_iobase) iounmap(priv(dev)->main_iobase); if (priv(dev)->dio_counter_iobase) iounmap(priv(dev)->dio_counter_iobase); /* free pci dma buffers */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) { if (priv(dev)->ai_buffer[i]) pci_free_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, priv(dev)-> ai_buffer[i], priv (dev)->ai_buffer_bus_addr [i]); } for (i = 0; i < AO_DMA_RING_COUNT; i++) { if (priv(dev)->ao_buffer[i]) pci_free_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE, priv(dev)-> ao_buffer[i], priv (dev)->ao_buffer_bus_addr [i]); } /* free dma descriptors */ if (priv(dev)->ai_dma_desc) pci_free_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * ai_dma_ring_count(board (dev)), priv(dev)->ai_dma_desc, priv(dev)-> ai_dma_desc_bus_addr); if (priv(dev)->ao_dma_desc) pci_free_consistent(priv(dev)->hw_dev, sizeof(struct plx_dma_desc) * AO_DMA_RING_COUNT, priv(dev)->ao_dma_desc, priv(dev)-> ao_dma_desc_bus_addr); if (priv(dev)->main_phys_iobase) comedi_pci_disable(priv(dev)->hw_dev); pci_dev_put(priv(dev)->hw_dev); } } if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 4); return 0; } static int ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits = 0, n, i; unsigned int channel, range, aref; unsigned long flags; static const int timeout = 100; DEBUG_PRINT("chanspec 0x%x\n", insn->chanspec); channel = CR_CHAN(insn->chanspec); range = CR_RANGE(insn->chanspec); aref = CR_AREF(insn->chanspec); /* disable card's analog input interrupt sources and pacing */ /* 4020 generates dac done interrupts even though they are disabled */ disable_ai_pacing(dev); spin_lock_irqsave(&dev->spinlock, flags); if (insn->chanspec & CR_ALT_FILTER) priv(dev)->adc_control1_bits |= ADC_DITHER_BIT; else priv(dev)->adc_control1_bits &= ~ADC_DITHER_BIT; writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); spin_unlock_irqrestore(&dev->spinlock, flags); if (board(dev)->layout != LAYOUT_4020) { /* use internal queue */ priv(dev)->hw_config_bits &= ~EXT_QUEUE_BIT; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); /* ALT_SOURCE is internal calibration reference */ if (insn->chanspec & CR_ALT_SOURCE) { unsigned int cal_en_bit; DEBUG_PRINT("reading calibration source\n"); if (board(dev)->layout == LAYOUT_60XX) cal_en_bit = CAL_EN_60XX_BIT; else cal_en_bit = CAL_EN_64XX_BIT; /* select internal reference source to connect to channel 0 */ writew(cal_en_bit | adc_src_bits(priv(dev)->calibration_source), priv(dev)->main_iobase + CALIBRATION_REG); } else { /* make sure internal calibration source is turned off */ writew(0, priv(dev)->main_iobase + CALIBRATION_REG); } /* load internal queue */ bits = 0; /* set gain */ bits |= ai_range_bits_6xxx(dev, CR_RANGE(insn->chanspec)); /* set single-ended / differential */ bits |= se_diff_bit_6xxx(dev, aref == AREF_DIFF); if (aref == AREF_COMMON) bits |= ADC_COMMON_BIT; bits |= adc_chan_bits(channel); /* set stop channel */ writew(adc_chan_bits(channel), priv(dev)->main_iobase + ADC_QUEUE_HIGH_REG); /* set start channel, and rest of settings */ writew(bits, priv(dev)->main_iobase + ADC_QUEUE_LOAD_REG); } else { uint8_t old_cal_range_bits = priv(dev)->i2c_cal_range_bits; priv(dev)->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK; if (insn->chanspec & CR_ALT_SOURCE) { DEBUG_PRINT("reading calibration source\n"); priv(dev)->i2c_cal_range_bits |= adc_src_4020_bits(priv(dev)->calibration_source); } else { /* select BNC inputs */ priv(dev)->i2c_cal_range_bits |= adc_src_4020_bits(4); } /* select range */ if (range == 0) priv(dev)->i2c_cal_range_bits |= attenuate_bit(channel); else priv(dev)->i2c_cal_range_bits &= ~attenuate_bit(channel); /* update calibration/range i2c register only if necessary, as it is very slow */ if (old_cal_range_bits != priv(dev)->i2c_cal_range_bits) { uint8_t i2c_data = priv(dev)->i2c_cal_range_bits; i2c_write(dev, RANGE_CAL_I2C_ADDR, &i2c_data, sizeof(i2c_data)); } /* 4020 manual asks that sample interval register to be set before writing to convert register. * Using somewhat arbitrary setting of 4 master clock ticks = 0.1 usec */ writew(0, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG); writew(2, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG); } for (n = 0; n < insn->n; n++) { /* clear adc buffer (inside loop for 4020 sake) */ writew(0, priv(dev)->main_iobase + ADC_BUFFER_CLEAR_REG); /* trigger conversion, bits sent only matter for 4020 */ writew(adc_convert_chan_4020_bits(CR_CHAN(insn->chanspec)), priv(dev)->main_iobase + ADC_CONVERT_REG); /* wait for data */ for (i = 0; i < timeout; i++) { bits = readw(priv(dev)->main_iobase + HW_STATUS_REG); DEBUG_PRINT(" pipe bits 0x%x\n", pipe_full_bits(bits)); if (board(dev)->layout == LAYOUT_4020) { if (readw(priv(dev)->main_iobase + ADC_WRITE_PNTR_REG)) break; } else { if (pipe_full_bits(bits)) break; } udelay(1); } DEBUG_PRINT(" looped %i times waiting for data\n", i); if (i == timeout) { comedi_error(dev, " analog input read insn timed out"); printk(" status 0x%x\n", bits); return -ETIME; } if (board(dev)->layout == LAYOUT_4020) data[n] = readl(priv(dev)->dio_counter_iobase + ADC_FIFO_REG) & 0xffff; else data[n] = readw(priv(dev)->main_iobase + PIPE1_READ_REG); } return n; } static int ai_config_calibration_source(struct comedi_device *dev, unsigned int *data) { unsigned int source = data[1]; int num_calibration_sources; if (board(dev)->layout == LAYOUT_60XX) num_calibration_sources = 16; else num_calibration_sources = 8; if (source >= num_calibration_sources) { dev_dbg(dev->hw_dev, "invalid calibration source: %i\n", source); return -EINVAL; } DEBUG_PRINT("setting calibration source to %i\n", source); priv(dev)->calibration_source = source; return 2; } static int ai_config_block_size(struct comedi_device *dev, unsigned int *data) { int fifo_size; const struct hw_fifo_info *const fifo = board(dev)->ai_fifo; unsigned int block_size, requested_block_size; int retval; requested_block_size = data[1]; if (requested_block_size) { fifo_size = requested_block_size * fifo->num_segments / bytes_in_sample; retval = set_ai_fifo_size(dev, fifo_size); if (retval < 0) return retval; } block_size = ai_fifo_size(dev) / fifo->num_segments * bytes_in_sample; data[1] = block_size; return 2; } static int ai_config_master_clock_4020(struct comedi_device *dev, unsigned int *data) { unsigned int divisor = data[4]; int retval = 0; if (divisor < 2) { divisor = 2; retval = -EAGAIN; } switch (data[1]) { case COMEDI_EV_SCAN_BEGIN: priv(dev)->ext_clock.divisor = divisor; priv(dev)->ext_clock.chanspec = data[2]; break; default: return -EINVAL; break; } data[4] = divisor; return retval ? retval : 5; } /* XXX could add support for 60xx series */ static int ai_config_master_clock(struct comedi_device *dev, unsigned int *data) { switch (board(dev)->layout) { case LAYOUT_4020: return ai_config_master_clock_4020(dev, data); break; default: return -EINVAL; break; } return -EINVAL; } static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int id = data[0]; switch (id) { case INSN_CONFIG_ALT_SOURCE: return ai_config_calibration_source(dev, data); break; case INSN_CONFIG_BLOCK_SIZE: return ai_config_block_size(dev, data); break; case INSN_CONFIG_TIMER_1: return ai_config_master_clock(dev, data); break; default: return -EINVAL; break; } return -EINVAL; } static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned int tmp_arg, tmp_arg2; int i; int aref; unsigned int triggers; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; triggers = TRIG_TIMER; if (board(dev)->layout == LAYOUT_4020) triggers |= TRIG_OTHER; else triggers |= TRIG_FOLLOW; cmd->scan_begin_src &= triggers; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; triggers = TRIG_TIMER; if (board(dev)->layout == LAYOUT_4020) triggers |= TRIG_NOW; else triggers |= TRIG_EXT; cmd->convert_src &= triggers; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_EXT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* uniqueness check */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_OTHER && cmd->scan_begin_src != TRIG_FOLLOW) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; /* compatibility check */ if (cmd->convert_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->convert_src == TRIG_TIMER) { if (board(dev)->layout == LAYOUT_4020) { if (cmd->convert_arg) { cmd->convert_arg = 0; err++; } } else { if (cmd->convert_arg < board(dev)->ai_speed) { cmd->convert_arg = board(dev)->ai_speed; err++; } if (cmd->scan_begin_src == TRIG_TIMER) { /* if scans are timed faster than conversion rate allows */ if (cmd->convert_arg * cmd->chanlist_len > cmd->scan_begin_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->chanlist_len; err++; } } } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } switch (cmd->stop_src) { case TRIG_EXT: break; case TRIG_COUNT: if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } break; case TRIG_NONE: if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } break; default: break; } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { tmp_arg = cmd->convert_arg; tmp_arg2 = cmd->scan_begin_arg; check_adc_timing(dev, cmd); if (tmp_arg != cmd->convert_arg) err++; if (tmp_arg2 != cmd->scan_begin_arg) err++; } if (err) return 4; /* make sure user is doesn't change analog reference mid chanlist */ if (cmd->chanlist) { aref = CR_AREF(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (aref != CR_AREF(cmd->chanlist[i])) { comedi_error(dev, "all elements in chanlist must use the same analog reference"); err++; break; } } /* check 4020 chanlist */ if (board(dev)->layout == LAYOUT_4020) { unsigned int first_channel = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != first_channel + i) { comedi_error(dev, "chanlist must use consecutive channels"); err++; break; } } if (cmd->chanlist_len == 3) { comedi_error(dev, "chanlist cannot be 3 channels long, use 1, 2, or 4 channels"); err++; } } } if (err) return 5; return 0; } static int use_hw_sample_counter(struct comedi_cmd *cmd) { /* disable for now until I work out a race */ return 0; if (cmd->stop_src == TRIG_COUNT && cmd->stop_arg <= max_counter_value) return 1; else return 0; } static void setup_sample_counters(struct comedi_device *dev, struct comedi_cmd *cmd) { if (cmd->stop_src == TRIG_COUNT) { /* set software count */ priv(dev)->ai_count = cmd->stop_arg * cmd->chanlist_len; } /* load hardware conversion counter */ if (use_hw_sample_counter(cmd)) { writew(cmd->stop_arg & 0xffff, priv(dev)->main_iobase + ADC_COUNT_LOWER_REG); writew((cmd->stop_arg >> 16) & 0xff, priv(dev)->main_iobase + ADC_COUNT_UPPER_REG); } else { writew(1, priv(dev)->main_iobase + ADC_COUNT_LOWER_REG); } } static inline unsigned int dma_transfer_size(struct comedi_device *dev) { unsigned int num_samples; num_samples = priv(dev)->ai_fifo_segment_length * board(dev)->ai_fifo->sample_packing_ratio; if (num_samples > DMA_BUFFER_SIZE / sizeof(uint16_t)) num_samples = DMA_BUFFER_SIZE / sizeof(uint16_t); return num_samples; } static void disable_ai_pacing(struct comedi_device *dev) { unsigned long flags; disable_ai_interrupts(dev); spin_lock_irqsave(&dev->spinlock, flags); priv(dev)->adc_control1_bits &= ~ADC_SW_GATE_BIT; writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); spin_unlock_irqrestore(&dev->spinlock, flags); /* disable pacing, triggering, etc */ writew(ADC_DMA_DISABLE_BIT | ADC_SOFT_GATE_BITS | ADC_GATE_LEVEL_BIT, priv(dev)->main_iobase + ADC_CONTROL0_REG); } static void disable_ai_interrupts(struct comedi_device *dev) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); priv(dev)->intr_enable_bits &= ~EN_ADC_INTR_SRC_BIT & ~EN_ADC_DONE_INTR_BIT & ~EN_ADC_ACTIVE_INTR_BIT & ~EN_ADC_STOP_INTR_BIT & ~EN_ADC_OVERRUN_BIT & ~ADC_INTR_SRC_MASK; writew(priv(dev)->intr_enable_bits, priv(dev)->main_iobase + INTR_ENABLE_REG); spin_unlock_irqrestore(&dev->spinlock, flags); DEBUG_PRINT("intr enable bits 0x%x\n", priv(dev)->intr_enable_bits); } static void enable_ai_interrupts(struct comedi_device *dev, const struct comedi_cmd *cmd) { uint32_t bits; unsigned long flags; bits = EN_ADC_OVERRUN_BIT | EN_ADC_DONE_INTR_BIT | EN_ADC_ACTIVE_INTR_BIT | EN_ADC_STOP_INTR_BIT; /* Use pio transfer and interrupt on end of conversion if TRIG_WAKE_EOS flag is set. */ if (cmd->flags & TRIG_WAKE_EOS) { /* 4020 doesn't support pio transfers except for fifo dregs */ if (board(dev)->layout != LAYOUT_4020) bits |= ADC_INTR_EOSCAN_BITS | EN_ADC_INTR_SRC_BIT; } spin_lock_irqsave(&dev->spinlock, flags); priv(dev)->intr_enable_bits |= bits; writew(priv(dev)->intr_enable_bits, priv(dev)->main_iobase + INTR_ENABLE_REG); DEBUG_PRINT("intr enable bits 0x%x\n", priv(dev)->intr_enable_bits); spin_unlock_irqrestore(&dev->spinlock, flags); } static uint32_t ai_convert_counter_6xxx(const struct comedi_device *dev, const struct comedi_cmd *cmd) { /* supposed to load counter with desired divisor minus 3 */ return cmd->convert_arg / TIMER_BASE - 3; } static uint32_t ai_scan_counter_6xxx(struct comedi_device *dev, struct comedi_cmd *cmd) { uint32_t count; /* figure out how long we need to delay at end of scan */ switch (cmd->scan_begin_src) { case TRIG_TIMER: count = (cmd->scan_begin_arg - (cmd->convert_arg * (cmd->chanlist_len - 1))) / TIMER_BASE; break; case TRIG_FOLLOW: count = cmd->convert_arg / TIMER_BASE; break; default: return 0; break; } return count - 3; } static uint32_t ai_convert_counter_4020(struct comedi_device *dev, struct comedi_cmd *cmd) { unsigned int divisor; switch (cmd->scan_begin_src) { case TRIG_TIMER: divisor = cmd->scan_begin_arg / TIMER_BASE; break; case TRIG_OTHER: divisor = priv(dev)->ext_clock.divisor; break; default: /* should never happen */ comedi_error(dev, "bug! failed to set ai pacing!"); divisor = 1000; break; } /* supposed to load counter with desired divisor minus 2 for 4020 */ return divisor - 2; } static void select_master_clock_4020(struct comedi_device *dev, const struct comedi_cmd *cmd) { /* select internal/external master clock */ priv(dev)->hw_config_bits &= ~MASTER_CLOCK_4020_MASK; if (cmd->scan_begin_src == TRIG_OTHER) { int chanspec = priv(dev)->ext_clock.chanspec; if (CR_CHAN(chanspec)) priv(dev)->hw_config_bits |= BNC_CLOCK_4020_BITS; else priv(dev)->hw_config_bits |= EXT_CLOCK_4020_BITS; } else { priv(dev)->hw_config_bits |= INTERNAL_CLOCK_4020_BITS; } writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); } static void select_master_clock(struct comedi_device *dev, const struct comedi_cmd *cmd) { switch (board(dev)->layout) { case LAYOUT_4020: select_master_clock_4020(dev, cmd); break; default: break; } } static inline void dma_start_sync(struct comedi_device *dev, unsigned int channel) { unsigned long flags; /* spinlock for plx dma control/status reg */ spin_lock_irqsave(&dev->spinlock, flags); if (channel) writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA1_CS_REG); else writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); spin_unlock_irqrestore(&dev->spinlock, flags); } static void set_ai_pacing(struct comedi_device *dev, struct comedi_cmd *cmd) { uint32_t convert_counter = 0, scan_counter = 0; check_adc_timing(dev, cmd); select_master_clock(dev, cmd); if (board(dev)->layout == LAYOUT_4020) { convert_counter = ai_convert_counter_4020(dev, cmd); } else { convert_counter = ai_convert_counter_6xxx(dev, cmd); scan_counter = ai_scan_counter_6xxx(dev, cmd); } /* load lower 16 bits of convert interval */ writew(convert_counter & 0xffff, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_LOWER_REG); DEBUG_PRINT("convert counter 0x%x\n", convert_counter); /* load upper 8 bits of convert interval */ writew((convert_counter >> 16) & 0xff, priv(dev)->main_iobase + ADC_SAMPLE_INTERVAL_UPPER_REG); /* load lower 16 bits of scan delay */ writew(scan_counter & 0xffff, priv(dev)->main_iobase + ADC_DELAY_INTERVAL_LOWER_REG); /* load upper 8 bits of scan delay */ writew((scan_counter >> 16) & 0xff, priv(dev)->main_iobase + ADC_DELAY_INTERVAL_UPPER_REG); DEBUG_PRINT("scan counter 0x%x\n", scan_counter); } static int use_internal_queue_6xxx(const struct comedi_cmd *cmd) { int i; for (i = 0; i + 1 < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i + 1]) != CR_CHAN(cmd->chanlist[i]) + 1) return 0; if (CR_RANGE(cmd->chanlist[i + 1]) != CR_RANGE(cmd->chanlist[i])) return 0; if (CR_AREF(cmd->chanlist[i + 1]) != CR_AREF(cmd->chanlist[i])) return 0; } return 1; } static int setup_channel_queue(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned short bits; int i; if (board(dev)->layout != LAYOUT_4020) { if (use_internal_queue_6xxx(cmd)) { priv(dev)->hw_config_bits &= ~EXT_QUEUE_BIT; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); bits = 0; /* set channel */ bits |= adc_chan_bits(CR_CHAN(cmd->chanlist[0])); /* set gain */ bits |= ai_range_bits_6xxx(dev, CR_RANGE(cmd->chanlist[0])); /* set single-ended / differential */ bits |= se_diff_bit_6xxx(dev, CR_AREF(cmd->chanlist[0]) == AREF_DIFF); if (CR_AREF(cmd->chanlist[0]) == AREF_COMMON) bits |= ADC_COMMON_BIT; /* set stop channel */ writew(adc_chan_bits (CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])), priv(dev)->main_iobase + ADC_QUEUE_HIGH_REG); /* set start channel, and rest of settings */ writew(bits, priv(dev)->main_iobase + ADC_QUEUE_LOAD_REG); } else { /* use external queue */ if (dev->write_subdev && dev->write_subdev->busy) { warn_external_queue(dev); return -EBUSY; } priv(dev)->hw_config_bits |= EXT_QUEUE_BIT; writew(priv(dev)->hw_config_bits, priv(dev)->main_iobase + HW_CONFIG_REG); /* clear DAC buffer to prevent weird interactions */ writew(0, priv(dev)->main_iobase + DAC_BUFFER_CLEAR_REG); /* clear queue pointer */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_CLEAR_REG); /* load external queue */ for (i = 0; i < cmd->chanlist_len; i++) { bits = 0; /* set channel */ bits |= adc_chan_bits(CR_CHAN(cmd->chanlist[i])); /* set gain */ bits |= ai_range_bits_6xxx(dev, CR_RANGE(cmd-> chanlist [i])); /* set single-ended / differential */ bits |= se_diff_bit_6xxx(dev, CR_AREF(cmd-> chanlist[i]) == AREF_DIFF); if (CR_AREF(cmd->chanlist[i]) == AREF_COMMON) bits |= ADC_COMMON_BIT; /* mark end of queue */ if (i == cmd->chanlist_len - 1) bits |= QUEUE_EOSCAN_BIT | QUEUE_EOSEQ_BIT; writew(bits, priv(dev)->main_iobase + ADC_QUEUE_FIFO_REG); DEBUG_PRINT ("wrote 0x%x to external channel queue\n", bits); } /* doing a queue clear is not specified in board docs, * but required for reliable operation */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_CLEAR_REG); /* prime queue holding register */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_LOAD_REG); } } else { unsigned short old_cal_range_bits = priv(dev)->i2c_cal_range_bits; priv(dev)->i2c_cal_range_bits &= ~ADC_SRC_4020_MASK; /* select BNC inputs */ priv(dev)->i2c_cal_range_bits |= adc_src_4020_bits(4); /* select ranges */ for (i = 0; i < cmd->chanlist_len; i++) { unsigned int channel = CR_CHAN(cmd->chanlist[i]); unsigned int range = CR_RANGE(cmd->chanlist[i]); if (range == 0) priv(dev)->i2c_cal_range_bits |= attenuate_bit(channel); else priv(dev)->i2c_cal_range_bits &= ~attenuate_bit(channel); } /* update calibration/range i2c register only if necessary, as it is very slow */ if (old_cal_range_bits != priv(dev)->i2c_cal_range_bits) { uint8_t i2c_data = priv(dev)->i2c_cal_range_bits; i2c_write(dev, RANGE_CAL_I2C_ADDR, &i2c_data, sizeof(i2c_data)); } } return 0; } static inline void load_first_dma_descriptor(struct comedi_device *dev, unsigned int dma_channel, unsigned int descriptor_bits) { /* The transfer size, pci address, and local address registers * are supposedly unused during chained dma, * but I have found that left over values from last operation * occasionally cause problems with transfer of first dma * block. Initializing them to zero seems to fix the problem. */ if (dma_channel) { writel(0, priv(dev)->plx9080_iobase + PLX_DMA1_TRANSFER_SIZE_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA1_LOCAL_ADDRESS_REG); writel(descriptor_bits, priv(dev)->plx9080_iobase + PLX_DMA1_DESCRIPTOR_REG); } else { writel(0, priv(dev)->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG); writel(0, priv(dev)->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG); writel(descriptor_bits, priv(dev)->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG); } } static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; uint32_t bits; unsigned int i; unsigned long flags; int retval; disable_ai_pacing(dev); abort_dma(dev, 1); retval = setup_channel_queue(dev, cmd); if (retval < 0) return retval; /* make sure internal calibration source is turned off */ writew(0, priv(dev)->main_iobase + CALIBRATION_REG); set_ai_pacing(dev, cmd); setup_sample_counters(dev, cmd); enable_ai_interrupts(dev, cmd); spin_lock_irqsave(&dev->spinlock, flags); /* set mode, allow conversions through software gate */ priv(dev)->adc_control1_bits |= ADC_SW_GATE_BIT; priv(dev)->adc_control1_bits &= ~ADC_DITHER_BIT; if (board(dev)->layout != LAYOUT_4020) { priv(dev)->adc_control1_bits &= ~ADC_MODE_MASK; if (cmd->convert_src == TRIG_EXT) priv(dev)->adc_control1_bits |= adc_mode_bits(13); /* good old mode 13 */ else priv(dev)->adc_control1_bits |= adc_mode_bits(8); /* mode 8. What else could you need? */ } else { priv(dev)->adc_control1_bits &= ~CHANNEL_MODE_4020_MASK; if (cmd->chanlist_len == 4) priv(dev)->adc_control1_bits |= FOUR_CHANNEL_4020_BITS; else if (cmd->chanlist_len == 2) priv(dev)->adc_control1_bits |= TWO_CHANNEL_4020_BITS; priv(dev)->adc_control1_bits &= ~ADC_LO_CHANNEL_4020_MASK; priv(dev)->adc_control1_bits |= adc_lo_chan_4020_bits(CR_CHAN(cmd->chanlist[0])); priv(dev)->adc_control1_bits &= ~ADC_HI_CHANNEL_4020_MASK; priv(dev)->adc_control1_bits |= adc_hi_chan_4020_bits(CR_CHAN (cmd-> chanlist[cmd->chanlist_len - 1])); } writew(priv(dev)->adc_control1_bits, priv(dev)->main_iobase + ADC_CONTROL1_REG); DEBUG_PRINT("control1 bits 0x%x\n", priv(dev)->adc_control1_bits); spin_unlock_irqrestore(&dev->spinlock, flags); /* clear adc buffer */ writew(0, priv(dev)->main_iobase + ADC_BUFFER_CLEAR_REG); if ((cmd->flags & TRIG_WAKE_EOS) == 0 || board(dev)->layout == LAYOUT_4020) { priv(dev)->ai_dma_index = 0; /* set dma transfer size */ for (i = 0; i < ai_dma_ring_count(board(dev)); i++) priv(dev)->ai_dma_desc[i].transfer_size = cpu_to_le32(dma_transfer_size(dev) * sizeof(uint16_t)); /* give location of first dma descriptor */ load_first_dma_descriptor(dev, 1, priv(dev)->ai_dma_desc_bus_addr | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI); dma_start_sync(dev, 1); } if (board(dev)->layout == LAYOUT_4020) { /* set source for external triggers */ bits = 0; if (cmd->start_src == TRIG_EXT && CR_CHAN(cmd->start_arg)) bits |= EXT_START_TRIG_BNC_BIT; if (cmd->stop_src == TRIG_EXT && CR_CHAN(cmd->stop_arg)) bits |= EXT_STOP_TRIG_BNC_BIT; writew(bits, priv(dev)->main_iobase + DAQ_ATRIG_LOW_4020_REG); } spin_lock_irqsave(&dev->spinlock, flags); /* enable pacing, triggering, etc */ bits = ADC_ENABLE_BIT | ADC_SOFT_GATE_BITS | ADC_GATE_LEVEL_BIT; if (cmd->flags & TRIG_WAKE_EOS) bits |= ADC_DMA_DISABLE_BIT; /* set start trigger */ if (cmd->start_src == TRIG_EXT) { bits |= ADC_START_TRIG_EXT_BITS; if (cmd->start_arg & CR_INVERT) bits |= ADC_START_TRIG_FALLING_BIT; } else if (cmd->start_src == TRIG_NOW) bits |= ADC_START_TRIG_SOFT_BITS; if (use_hw_sample_counter(cmd)) bits |= ADC_SAMPLE_COUNTER_EN_BIT; writew(bits, priv(dev)->main_iobase + ADC_CONTROL0_REG); DEBUG_PRINT("control0 bits 0x%x\n", bits); priv(dev)->ai_cmd_running = 1; spin_unlock_irqrestore(&dev->spinlock, flags); /* start acquisition */ if (cmd->start_src == TRIG_NOW) { writew(0, priv(dev)->main_iobase + ADC_START_REG); DEBUG_PRINT("soft trig\n"); } return 0; } /* read num_samples from 16 bit wide ai fifo */ static void pio_drain_ai_fifo_16(struct comedi_device *dev) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i; uint16_t prepost_bits; int read_segment, read_index, write_segment, write_index; int num_samples; do { /* get least significant 15 bits */ read_index = readw(priv(dev)->main_iobase + ADC_READ_PNTR_REG) & 0x7fff; write_index = readw(priv(dev)->main_iobase + ADC_WRITE_PNTR_REG) & 0x7fff; /* Get most significant bits (grey code). Different boards use different code * so use a scheme that doesn't depend on encoding. This read must * occur after reading least significant 15 bits to avoid race * with fifo switching to next segment. */ prepost_bits = readw(priv(dev)->main_iobase + PREPOST_REG); /* if read and write pointers are not on the same fifo segment, read to the * end of the read segment */ read_segment = adc_upper_read_ptr_code(prepost_bits); write_segment = adc_upper_write_ptr_code(prepost_bits); DEBUG_PRINT(" rd seg %i, wrt seg %i, rd idx %i, wrt idx %i\n", read_segment, write_segment, read_index, write_index); if (read_segment != write_segment) num_samples = priv(dev)->ai_fifo_segment_length - read_index; else num_samples = write_index - read_index; if (cmd->stop_src == TRIG_COUNT) { if (priv(dev)->ai_count == 0) break; if (num_samples > priv(dev)->ai_count) num_samples = priv(dev)->ai_count; priv(dev)->ai_count -= num_samples; } if (num_samples < 0) { dev_err(dev->hw_dev, "cb_pcidas64: bug! num_samples < 0\n"); break; } DEBUG_PRINT(" read %i samples from fifo\n", num_samples); for (i = 0; i < num_samples; i++) { cfc_write_to_buffer(s, readw(priv(dev)->main_iobase + ADC_FIFO_REG)); } } while (read_segment != write_segment); } /* Read from 32 bit wide ai fifo of 4020 - deal with insane grey coding of pointers. * The pci-4020 hardware only supports * dma transfers (it only supports the use of pio for draining the last remaining * points from the fifo when a data acquisition operation has completed). */ static void pio_drain_ai_fifo_32(struct comedi_device *dev) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i; unsigned int max_transfer = 100000; uint32_t fifo_data; int write_code = readw(priv(dev)->main_iobase + ADC_WRITE_PNTR_REG) & 0x7fff; int read_code = readw(priv(dev)->main_iobase + ADC_READ_PNTR_REG) & 0x7fff; if (cmd->stop_src == TRIG_COUNT) { if (max_transfer > priv(dev)->ai_count) max_transfer = priv(dev)->ai_count; } for (i = 0; read_code != write_code && i < max_transfer;) { fifo_data = readl(priv(dev)->dio_counter_iobase + ADC_FIFO_REG); cfc_write_to_buffer(s, fifo_data & 0xffff); i++; if (i < max_transfer) { cfc_write_to_buffer(s, (fifo_data >> 16) & 0xffff); i++; } read_code = readw(priv(dev)->main_iobase + ADC_READ_PNTR_REG) & 0x7fff; } priv(dev)->ai_count -= i; } /* empty fifo */ static void pio_drain_ai_fifo(struct comedi_device *dev) { if (board(dev)->layout == LAYOUT_4020) pio_drain_ai_fifo_32(dev); else pio_drain_ai_fifo_16(dev); } static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel) { struct comedi_async *async = dev->read_subdev->async; uint32_t next_transfer_addr; int j; int num_samples = 0; void __iomem *pci_addr_reg; if (channel) pci_addr_reg = priv(dev)->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG; else pci_addr_reg = priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG; /* loop until we have read all the full buffers */ for (j = 0, next_transfer_addr = readl(pci_addr_reg); (next_transfer_addr < priv(dev)->ai_buffer_bus_addr[priv(dev)->ai_dma_index] || next_transfer_addr >= priv(dev)->ai_buffer_bus_addr[priv(dev)->ai_dma_index] + DMA_BUFFER_SIZE) && j < ai_dma_ring_count(board(dev)); j++) { /* transfer data from dma buffer to comedi buffer */ num_samples = dma_transfer_size(dev); if (async->cmd.stop_src == TRIG_COUNT) { if (num_samples > priv(dev)->ai_count) num_samples = priv(dev)->ai_count; priv(dev)->ai_count -= num_samples; } cfc_write_array_to_buffer(dev->read_subdev, priv(dev)->ai_buffer[priv(dev)-> ai_dma_index], num_samples * sizeof(uint16_t)); priv(dev)->ai_dma_index = (priv(dev)->ai_dma_index + 1) % ai_dma_ring_count(board(dev)); DEBUG_PRINT("next buffer addr 0x%lx\n", (unsigned long)priv(dev)-> ai_buffer_bus_addr[priv(dev)->ai_dma_index]); DEBUG_PRINT("pci addr reg 0x%x\n", next_transfer_addr); } /* XXX check for dma ring buffer overrun (use end-of-chain bit to mark last * unused buffer) */ } static void handle_ai_interrupt(struct comedi_device *dev, unsigned short status, unsigned int plx_status) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; uint8_t dma1_status; unsigned long flags; /* check for fifo overrun */ if (status & ADC_OVERRUN_BIT) { comedi_error(dev, "fifo overrun"); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; } /* spin lock makes sure no one else changes plx dma control reg */ spin_lock_irqsave(&dev->spinlock, flags); dma1_status = readb(priv(dev)->plx9080_iobase + PLX_DMA1_CS_REG); if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */ writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA1_CS_REG); DEBUG_PRINT("dma1 status 0x%x\n", dma1_status); if (dma1_status & PLX_DMA_EN_BIT) drain_dma_buffers(dev, 1); DEBUG_PRINT(" cleared dma ch1 interrupt\n"); } spin_unlock_irqrestore(&dev->spinlock, flags); if (status & ADC_DONE_BIT) DEBUG_PRINT("adc done interrupt\n"); /* drain fifo with pio */ if ((status & ADC_DONE_BIT) || ((cmd->flags & TRIG_WAKE_EOS) && (status & ADC_INTR_PENDING_BIT) && (board(dev)->layout != LAYOUT_4020))) { DEBUG_PRINT("pio fifo drain\n"); spin_lock_irqsave(&dev->spinlock, flags); if (priv(dev)->ai_cmd_running) { spin_unlock_irqrestore(&dev->spinlock, flags); pio_drain_ai_fifo(dev); } else spin_unlock_irqrestore(&dev->spinlock, flags); } /* if we are have all the data, then quit */ if ((cmd->stop_src == TRIG_COUNT && (int)priv(dev)->ai_count <= 0) || (cmd->stop_src == TRIG_EXT && (status & ADC_STOP_BIT))) { async->events |= COMEDI_CB_EOA; } cfc_handle_events(dev, s); } static inline unsigned int prev_ao_dma_index(struct comedi_device *dev) { unsigned int buffer_index; if (priv(dev)->ao_dma_index == 0) buffer_index = AO_DMA_RING_COUNT - 1; else buffer_index = priv(dev)->ao_dma_index - 1; return buffer_index; } static int last_ao_dma_load_completed(struct comedi_device *dev) { unsigned int buffer_index; unsigned int transfer_address; unsigned short dma_status; buffer_index = prev_ao_dma_index(dev); dma_status = readb(priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); if ((dma_status & PLX_DMA_DONE_BIT) == 0) return 0; transfer_address = readl(priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG); if (transfer_address != priv(dev)->ao_buffer_bus_addr[buffer_index]) return 0; return 1; } static int ao_stopped_by_error(struct comedi_device *dev, const struct comedi_cmd *cmd) { if (cmd->stop_src == TRIG_NONE) return 1; if (cmd->stop_src == TRIG_COUNT) { if (priv(dev)->ao_count) return 1; if (last_ao_dma_load_completed(dev) == 0) return 1; } return 0; } static inline int ao_dma_needs_restart(struct comedi_device *dev, unsigned short dma_status) { if ((dma_status & PLX_DMA_DONE_BIT) == 0 || (dma_status & PLX_DMA_EN_BIT) == 0) return 0; if (last_ao_dma_load_completed(dev)) return 0; return 1; } static void restart_ao_dma(struct comedi_device *dev) { unsigned int dma_desc_bits; dma_desc_bits = readl(priv(dev)->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG); dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT; DEBUG_PRINT("restarting ao dma, descriptor reg 0x%x\n", dma_desc_bits); load_first_dma_descriptor(dev, 0, dma_desc_bits); dma_start_sync(dev, 0); } static void handle_ao_interrupt(struct comedi_device *dev, unsigned short status, unsigned int plx_status) { struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async; struct comedi_cmd *cmd; uint8_t dma0_status; unsigned long flags; /* board might not support ao, in which case write_subdev is NULL */ if (s == NULL) return; async = s->async; cmd = &async->cmd; /* spin lock makes sure no one else changes plx dma control reg */ spin_lock_irqsave(&dev->spinlock, flags); dma0_status = readb(priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */ if ((dma0_status & PLX_DMA_EN_BIT) && !(dma0_status & PLX_DMA_DONE_BIT)) writeb(PLX_DMA_EN_BIT | PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); else writeb(PLX_CLEAR_DMA_INTR_BIT, priv(dev)->plx9080_iobase + PLX_DMA0_CS_REG); spin_unlock_irqrestore(&dev->spinlock, flags); DEBUG_PRINT("dma0 status 0x%x\n", dma0_status); if (dma0_status & PLX_DMA_EN_BIT) { load_ao_dma(dev, cmd); /* try to recover from dma end-of-chain event */ if (ao_dma_needs_restart(dev, dma0_status)) restart_ao_dma(dev); } DEBUG_PRINT(" cleared dma ch0 interrupt\n"); } else spin_unlock_irqrestore(&dev->spinlock, flags); if ((status & DAC_DONE_BIT)) { async->events |= COMEDI_CB_EOA; if (ao_stopped_by_error(dev, cmd)) async->events |= COMEDI_CB_ERROR; DEBUG_PRINT("plx dma0 desc reg 0x%x\n", readl(priv(dev)->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG)); DEBUG_PRINT("plx dma0 address reg 0x%x\n", readl(priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG)); } cfc_handle_events(dev, s); } static irqreturn_t handle_interrupt(int irq, void *d) { struct comedi_device *dev = d; unsigned short status; uint32_t plx_status; uint32_t plx_bits; plx_status = readl(priv(dev)->plx9080_iobase + PLX_INTRCS_REG); status = readw(priv(dev)->main_iobase + HW_STATUS_REG); DEBUG_PRINT("cb_pcidas64: hw status 0x%x ", status); DEBUG_PRINT("plx status 0x%x\n", plx_status); /* an interrupt before all the postconfig stuff gets done could * cause a NULL dereference if we continue through the * interrupt handler */ if (dev->attached == 0) { DEBUG_PRINT("cb_pcidas64: premature interrupt, ignoring", status); return IRQ_HANDLED; } handle_ai_interrupt(dev, status, plx_status); handle_ao_interrupt(dev, status, plx_status); /* clear possible plx9080 interrupt sources */ if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */ plx_bits = readl(priv(dev)->plx9080_iobase + PLX_DBR_OUT_REG); writel(plx_bits, priv(dev)->plx9080_iobase + PLX_DBR_OUT_REG); DEBUG_PRINT(" cleared local doorbell bits 0x%x\n", plx_bits); } DEBUG_PRINT("exiting handler\n"); return IRQ_HANDLED; } static void abort_dma(struct comedi_device *dev, unsigned int channel) { unsigned long flags; /* spinlock for plx dma control/status reg */ spin_lock_irqsave(&dev->spinlock, flags); plx9080_abort_dma(priv(dev)->plx9080_iobase, channel); spin_unlock_irqrestore(&dev->spinlock, flags); } static int ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); if (priv(dev)->ai_cmd_running == 0) { spin_unlock_irqrestore(&dev->spinlock, flags); return 0; } priv(dev)->ai_cmd_running = 0; spin_unlock_irqrestore(&dev->spinlock, flags); disable_ai_pacing(dev); abort_dma(dev, 1); DEBUG_PRINT("ai canceled\n"); return 0; } static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); int range = CR_RANGE(insn->chanspec); /* do some initializing */ writew(0, priv(dev)->main_iobase + DAC_CONTROL0_REG); /* set range */ set_dac_range_bits(dev, &priv(dev)->dac_control1_bits, chan, range); writew(priv(dev)->dac_control1_bits, priv(dev)->main_iobase + DAC_CONTROL1_REG); /* write to channel */ if (board(dev)->layout == LAYOUT_4020) { writew(data[0] & 0xff, priv(dev)->main_iobase + dac_lsb_4020_reg(chan)); writew((data[0] >> 8) & 0xf, priv(dev)->main_iobase + dac_msb_4020_reg(chan)); } else { writew(data[0], priv(dev)->main_iobase + dac_convert_reg(chan)); } /* remember output value */ priv(dev)->ao_value[chan] = data[0]; return 1; } static int ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = priv(dev)->ao_value[CR_CHAN(insn->chanspec)]; return 1; } static void set_dac_control0_reg(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int bits = DAC_ENABLE_BIT | WAVEFORM_GATE_LEVEL_BIT | WAVEFORM_GATE_ENABLE_BIT | WAVEFORM_GATE_SELECT_BIT; if (cmd->start_src == TRIG_EXT) { bits |= WAVEFORM_TRIG_EXT_BITS; if (cmd->start_arg & CR_INVERT) bits |= WAVEFORM_TRIG_FALLING_BIT; } else { bits |= WAVEFORM_TRIG_SOFT_BITS; } if (cmd->scan_begin_src == TRIG_EXT) { bits |= DAC_EXT_UPDATE_ENABLE_BIT; if (cmd->scan_begin_arg & CR_INVERT) bits |= DAC_EXT_UPDATE_FALLING_BIT; } writew(bits, priv(dev)->main_iobase + DAC_CONTROL0_REG); } static void set_dac_control1_reg(struct comedi_device *dev, const struct comedi_cmd *cmd) { int i; for (i = 0; i < cmd->chanlist_len; i++) { int channel, range; channel = CR_CHAN(cmd->chanlist[i]); range = CR_RANGE(cmd->chanlist[i]); set_dac_range_bits(dev, &priv(dev)->dac_control1_bits, channel, range); } priv(dev)->dac_control1_bits |= DAC_SW_GATE_BIT; writew(priv(dev)->dac_control1_bits, priv(dev)->main_iobase + DAC_CONTROL1_REG); } static void set_dac_select_reg(struct comedi_device *dev, const struct comedi_cmd *cmd) { uint16_t bits; unsigned int first_channel, last_channel; first_channel = CR_CHAN(cmd->chanlist[0]); last_channel = CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]); if (last_channel < first_channel) comedi_error(dev, "bug! last ao channel < first ao channel"); bits = (first_channel & 0x7) | (last_channel & 0x7) << 3; writew(bits, priv(dev)->main_iobase + DAC_SELECT_REG); } static void set_dac_interval_regs(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int divisor; if (cmd->scan_begin_src != TRIG_TIMER) return; divisor = get_ao_divisor(cmd->scan_begin_arg, cmd->flags); if (divisor > max_counter_value) { comedi_error(dev, "bug! ao divisor too big"); divisor = max_counter_value; } writew(divisor & 0xffff, priv(dev)->main_iobase + DAC_SAMPLE_INTERVAL_LOWER_REG); writew((divisor >> 16) & 0xff, priv(dev)->main_iobase + DAC_SAMPLE_INTERVAL_UPPER_REG); } static unsigned int load_ao_dma_buffer(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int num_bytes, buffer_index, prev_buffer_index; unsigned int next_bits; buffer_index = priv(dev)->ao_dma_index; prev_buffer_index = prev_ao_dma_index(dev); DEBUG_PRINT("attempting to load ao buffer %i (0x%x)\n", buffer_index, priv(dev)->ao_buffer_bus_addr[buffer_index]); num_bytes = comedi_buf_read_n_available(dev->write_subdev->async); if (num_bytes > DMA_BUFFER_SIZE) num_bytes = DMA_BUFFER_SIZE; if (cmd->stop_src == TRIG_COUNT && num_bytes > priv(dev)->ao_count) num_bytes = priv(dev)->ao_count; num_bytes -= num_bytes % bytes_in_sample; if (num_bytes == 0) return 0; DEBUG_PRINT("loading %i bytes\n", num_bytes); num_bytes = cfc_read_array_from_buffer(dev->write_subdev, priv(dev)-> ao_buffer[buffer_index], num_bytes); priv(dev)->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(num_bytes); /* set end of chain bit so we catch underruns */ next_bits = le32_to_cpu(priv(dev)->ao_dma_desc[buffer_index].next); next_bits |= PLX_END_OF_CHAIN_BIT; priv(dev)->ao_dma_desc[buffer_index].next = cpu_to_le32(next_bits); /* clear end of chain bit on previous buffer now that we have set it * for the last buffer */ next_bits = le32_to_cpu(priv(dev)->ao_dma_desc[prev_buffer_index].next); next_bits &= ~PLX_END_OF_CHAIN_BIT; priv(dev)->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits); priv(dev)->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT; priv(dev)->ao_count -= num_bytes; return num_bytes; } static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int num_bytes; unsigned int next_transfer_addr; void __iomem *pci_addr_reg = priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG; unsigned int buffer_index; do { buffer_index = priv(dev)->ao_dma_index; /* don't overwrite data that hasn't been transferred yet */ next_transfer_addr = readl(pci_addr_reg); if (next_transfer_addr >= priv(dev)->ao_buffer_bus_addr[buffer_index] && next_transfer_addr < priv(dev)->ao_buffer_bus_addr[buffer_index] + DMA_BUFFER_SIZE) return; num_bytes = load_ao_dma_buffer(dev, cmd); } while (num_bytes >= DMA_BUFFER_SIZE); } static int prep_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd) { unsigned int num_bytes; int i; /* clear queue pointer too, since external queue has * weird interactions with ao fifo */ writew(0, priv(dev)->main_iobase + ADC_QUEUE_CLEAR_REG); writew(0, priv(dev)->main_iobase + DAC_BUFFER_CLEAR_REG); num_bytes = (DAC_FIFO_SIZE / 2) * bytes_in_sample; if (cmd->stop_src == TRIG_COUNT && num_bytes / bytes_in_sample > priv(dev)->ao_count) num_bytes = priv(dev)->ao_count * bytes_in_sample; num_bytes = cfc_read_array_from_buffer(dev->write_subdev, priv(dev)->ao_bounce_buffer, num_bytes); for (i = 0; i < num_bytes / bytes_in_sample; i++) { writew(priv(dev)->ao_bounce_buffer[i], priv(dev)->main_iobase + DAC_FIFO_REG); } priv(dev)->ao_count -= num_bytes / bytes_in_sample; if (cmd->stop_src == TRIG_COUNT && priv(dev)->ao_count == 0) return 0; num_bytes = load_ao_dma_buffer(dev, cmd); if (num_bytes == 0) return -1; if (num_bytes >= DMA_BUFFER_SIZE) ; load_ao_dma(dev, cmd); dma_start_sync(dev, 0); return 0; } static inline int external_ai_queue_in_use(struct comedi_device *dev) { if (dev->read_subdev->busy) return 0; if (board(dev)->layout == LAYOUT_4020) return 0; else if (use_internal_queue_6xxx(&dev->read_subdev->async->cmd)) return 0; return 1; } static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; if (external_ai_queue_in_use(dev)) { warn_external_queue(dev); return -EBUSY; } /* disable analog output system during setup */ writew(0x0, priv(dev)->main_iobase + DAC_CONTROL0_REG); priv(dev)->ao_dma_index = 0; priv(dev)->ao_count = cmd->stop_arg * cmd->chanlist_len; set_dac_select_reg(dev, cmd); set_dac_interval_regs(dev, cmd); load_first_dma_descriptor(dev, 0, priv(dev)->ao_dma_desc_bus_addr | PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT); set_dac_control1_reg(dev, cmd); s->async->inttrig = ao_inttrig; return 0; } static int ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct comedi_cmd *cmd = &s->async->cmd; int retval; if (trig_num != 0) return -EINVAL; retval = prep_ao_dma(dev, cmd); if (retval < 0) return -EPIPE; set_dac_control0_reg(dev, cmd); if (cmd->start_src == TRIG_INT) writew(0, priv(dev)->main_iobase + DAC_START_REG); s->async->inttrig = NULL; return 0; } static int ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned int tmp_arg; int i; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_INT | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* uniqueness check */ if (cmd->start_src != TRIG_INT && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; /* compatibility check */ if (cmd->convert_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < board(dev)->ao_scan_speed) { cmd->scan_begin_arg = board(dev)->ao_scan_speed; err++; } if (get_ao_divisor(cmd->scan_begin_arg, cmd->flags) > max_counter_value) { cmd->scan_begin_arg = (max_counter_value + 2) * TIMER_BASE; err++; } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp_arg = cmd->scan_begin_arg; cmd->scan_begin_arg = get_divisor(cmd->scan_begin_arg, cmd->flags) * TIMER_BASE; if (tmp_arg != cmd->scan_begin_arg) err++; } if (err) return 4; if (cmd->chanlist) { unsigned int first_channel = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != first_channel + i) { comedi_error(dev, "chanlist must use consecutive channels"); err++; break; } } } if (err) return 5; return 0; } static int ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { writew(0x0, priv(dev)->main_iobase + DAC_CONTROL0_REG); abort_dma(dev, 0); return 0; } static int dio_callback(int dir, int port, int data, unsigned long arg) { void __iomem *iobase = (void __iomem *)arg; if (dir) { writeb(data, iobase + port); DEBUG_PRINT("wrote 0x%x to port %i\n", data, port); return 0; } else { return readb(iobase + port); } } static int dio_callback_4020(int dir, int port, int data, unsigned long arg) { void __iomem *iobase = (void __iomem *)arg; if (dir) { writew(data, iobase + 2 * port); return 0; } else { return readw(iobase + 2 * port); } } static int di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits; bits = readb(priv(dev)->dio_counter_iobase + DI_REG); bits &= 0xf; data[1] = bits; data[0] = 0; return 2; } static int do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] &= 0xf; /* zero bits we are going to change */ s->state &= ~data[0]; /* set new bits */ s->state |= data[0] & data[1]; writeb(s->state, priv(dev)->dio_counter_iobase + DO_REG); data[1] = s->state; return 2; } static int dio_60xx_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; mask = 1 << CR_CHAN(insn->chanspec); switch (data[0]) { case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~mask; break; case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= mask; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT; return 2; default: return -EINVAL; } writeb(s->io_bits, priv(dev)->dio_counter_iobase + DIO_DIRECTION_60XX_REG); return 1; } static int dio_60xx_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); writeb(s->state, priv(dev)->dio_counter_iobase + DIO_DATA_60XX_REG); } data[1] = readb(priv(dev)->dio_counter_iobase + DIO_DATA_60XX_REG); return 2; } static void caldac_write(struct comedi_device *dev, unsigned int channel, unsigned int value) { priv(dev)->caldac_state[channel] = value; switch (board(dev)->layout) { case LAYOUT_60XX: case LAYOUT_64XX: caldac_8800_write(dev, channel, value); break; case LAYOUT_4020: caldac_i2c_write(dev, channel, value); break; default: break; } } static int calib_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel = CR_CHAN(insn->chanspec); /* return immediately if setting hasn't changed, since * programming these things is slow */ if (priv(dev)->caldac_state[channel] == data[0]) return 1; caldac_write(dev, channel, data[0]); return 1; } static int calib_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); data[0] = priv(dev)->caldac_state[channel]; return 1; } static void ad8402_write(struct comedi_device *dev, unsigned int channel, unsigned int value) { static const int bitstream_length = 10; unsigned int bit, register_bits; unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff); static const int ad8402_udelay = 1; priv(dev)->ad8402_state[channel] = value; register_bits = SELECT_8402_64XX_BIT; udelay(ad8402_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { if (bitstream & bit) register_bits |= SERIAL_DATA_IN_BIT; else register_bits &= ~SERIAL_DATA_IN_BIT; udelay(ad8402_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); udelay(ad8402_udelay); writew(register_bits | SERIAL_CLOCK_BIT, priv(dev)->main_iobase + CALIBRATION_REG); } udelay(ad8402_udelay); writew(0, priv(dev)->main_iobase + CALIBRATION_REG); } /* for pci-das6402/16, channel 0 is analog input gain and channel 1 is offset */ static int ad8402_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel = CR_CHAN(insn->chanspec); /* return immediately if setting hasn't changed, since * programming these things is slow */ if (priv(dev)->ad8402_state[channel] == data[0]) return 1; priv(dev)->ad8402_state[channel] = data[0]; ad8402_write(dev, channel, data[0]); return 1; } static int ad8402_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); data[0] = priv(dev)->ad8402_state[channel]; return 1; } static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address) { static const int bitstream_length = 11; static const int read_command = 0x6; unsigned int bitstream = (read_command << 8) | address; unsigned int bit; void __iomem * const plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG; uint16_t value; static const int value_length = 16; static const int eeprom_udelay = 1; udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CLK & ~CTL_EE_CS; /* make sure we don't send anything to the i2c bus on 4020 */ priv(dev)->plx_control_bits |= CTL_USERO; writel(priv(dev)->plx_control_bits, plx_control_addr); /* activate serial eeprom */ udelay(eeprom_udelay); priv(dev)->plx_control_bits |= CTL_EE_CS; writel(priv(dev)->plx_control_bits, plx_control_addr); /* write read command and desired memory address */ for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { /* set bit to be written */ udelay(eeprom_udelay); if (bitstream & bit) priv(dev)->plx_control_bits |= CTL_EE_W; else priv(dev)->plx_control_bits &= ~CTL_EE_W; writel(priv(dev)->plx_control_bits, plx_control_addr); /* clock in bit */ udelay(eeprom_udelay); priv(dev)->plx_control_bits |= CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); } /* read back value from eeprom memory location */ value = 0; for (bit = 1 << (value_length - 1); bit; bit >>= 1) { /* clock out bit */ udelay(eeprom_udelay); priv(dev)->plx_control_bits |= CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CLK; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(eeprom_udelay); if (readl(plx_control_addr) & CTL_EE_R) value |= bit; } /* deactivate eeprom serial input */ udelay(eeprom_udelay); priv(dev)->plx_control_bits &= ~CTL_EE_CS; writel(priv(dev)->plx_control_bits, plx_control_addr); return value; } static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = read_eeprom(dev, CR_CHAN(insn->chanspec)); return 1; } /* utility function that rounds desired timing to an achievable time, and * sets cmd members appropriately. * adc paces conversions from master clock by dividing by (x + 3) where x is 24 bit number */ static void check_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd) { unsigned int convert_divisor = 0, scan_divisor; static const int min_convert_divisor = 3; static const int max_convert_divisor = max_counter_value + min_convert_divisor; static const int min_scan_divisor_4020 = 2; unsigned long long max_scan_divisor, min_scan_divisor; if (cmd->convert_src == TRIG_TIMER) { if (board(dev)->layout == LAYOUT_4020) { cmd->convert_arg = 0; } else { convert_divisor = get_divisor(cmd->convert_arg, cmd->flags); if (convert_divisor > max_convert_divisor) convert_divisor = max_convert_divisor; if (convert_divisor < min_convert_divisor) convert_divisor = min_convert_divisor; cmd->convert_arg = convert_divisor * TIMER_BASE; } } else if (cmd->convert_src == TRIG_NOW) cmd->convert_arg = 0; if (cmd->scan_begin_src == TRIG_TIMER) { scan_divisor = get_divisor(cmd->scan_begin_arg, cmd->flags); if (cmd->convert_src == TRIG_TIMER) { /* XXX check for integer overflows */ min_scan_divisor = convert_divisor * cmd->chanlist_len; max_scan_divisor = (convert_divisor * cmd->chanlist_len - 1) + max_counter_value; } else { min_scan_divisor = min_scan_divisor_4020; max_scan_divisor = max_counter_value + min_scan_divisor; } if (scan_divisor > max_scan_divisor) scan_divisor = max_scan_divisor; if (scan_divisor < min_scan_divisor) scan_divisor = min_scan_divisor; cmd->scan_begin_arg = scan_divisor * TIMER_BASE; } return; } /* Gets nearest achievable timing given master clock speed, does not * take into account possible minimum/maximum divisor values. Used * by other timing checking functions. */ static unsigned int get_divisor(unsigned int ns, unsigned int flags) { unsigned int divisor; switch (flags & TRIG_ROUND_MASK) { case TRIG_ROUND_UP: divisor = (ns + TIMER_BASE - 1) / TIMER_BASE; break; case TRIG_ROUND_DOWN: divisor = ns / TIMER_BASE; break; case TRIG_ROUND_NEAREST: default: divisor = (ns + TIMER_BASE / 2) / TIMER_BASE; break; } return divisor; } static unsigned int get_ao_divisor(unsigned int ns, unsigned int flags) { return get_divisor(ns, flags) - 2; } /* adjusts the size of hardware fifo (which determines block size for dma xfers) */ static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples) { unsigned int num_fifo_entries; int retval; const struct hw_fifo_info *const fifo = board(dev)->ai_fifo; num_fifo_entries = num_samples / fifo->sample_packing_ratio; retval = set_ai_fifo_segment_length(dev, num_fifo_entries / fifo->num_segments); if (retval < 0) return retval; num_samples = retval * fifo->num_segments * fifo->sample_packing_ratio; DEBUG_PRINT("set hardware fifo size to %i\n", num_samples); return num_samples; } /* query length of fifo */ static unsigned int ai_fifo_size(struct comedi_device *dev) { return priv(dev)->ai_fifo_segment_length * board(dev)->ai_fifo->num_segments * board(dev)->ai_fifo->sample_packing_ratio; } static int set_ai_fifo_segment_length(struct comedi_device *dev, unsigned int num_entries) { static const int increment_size = 0x100; const struct hw_fifo_info *const fifo = board(dev)->ai_fifo; unsigned int num_increments; uint16_t bits; if (num_entries < increment_size) num_entries = increment_size; if (num_entries > fifo->max_segment_length) num_entries = fifo->max_segment_length; /* 1 == 256 entries, 2 == 512 entries, etc */ num_increments = (num_entries + increment_size / 2) / increment_size; bits = (~(num_increments - 1)) & fifo->fifo_size_reg_mask; priv(dev)->fifo_size_bits &= ~fifo->fifo_size_reg_mask; priv(dev)->fifo_size_bits |= bits; writew(priv(dev)->fifo_size_bits, priv(dev)->main_iobase + FIFO_SIZE_REG); priv(dev)->ai_fifo_segment_length = num_increments * increment_size; DEBUG_PRINT("set hardware fifo segment length to %i\n", priv(dev)->ai_fifo_segment_length); return priv(dev)->ai_fifo_segment_length; } /* pci-6025 8800 caldac: * address 0 == dac channel 0 offset * address 1 == dac channel 0 gain * address 2 == dac channel 1 offset * address 3 == dac channel 1 gain * address 4 == fine adc offset * address 5 == coarse adc offset * address 6 == coarse adc gain * address 7 == fine adc gain */ /* pci-6402/16 uses all 8 channels for dac: * address 0 == dac channel 0 fine gain * address 1 == dac channel 0 coarse gain * address 2 == dac channel 0 coarse offset * address 3 == dac channel 1 coarse offset * address 4 == dac channel 1 fine gain * address 5 == dac channel 1 coarse gain * address 6 == dac channel 0 fine offset * address 7 == dac channel 1 fine offset */ static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value) { static const int num_caldac_channels = 8; static const int bitstream_length = 11; unsigned int bitstream = ((address & 0x7) << 8) | value; unsigned int bit, register_bits; static const int caldac_8800_udelay = 1; if (address >= num_caldac_channels) { comedi_error(dev, "illegal caldac channel"); return -1; } for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { register_bits = 0; if (bitstream & bit) register_bits |= SERIAL_DATA_IN_BIT; udelay(caldac_8800_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); register_bits |= SERIAL_CLOCK_BIT; udelay(caldac_8800_udelay); writew(register_bits, priv(dev)->main_iobase + CALIBRATION_REG); } udelay(caldac_8800_udelay); writew(SELECT_8800_BIT, priv(dev)->main_iobase + CALIBRATION_REG); udelay(caldac_8800_udelay); writew(0, priv(dev)->main_iobase + CALIBRATION_REG); udelay(caldac_8800_udelay); return 0; } /* 4020 caldacs */ static int caldac_i2c_write(struct comedi_device *dev, unsigned int caldac_channel, unsigned int value) { uint8_t serial_bytes[3]; uint8_t i2c_addr; enum pointer_bits { /* manual has gain and offset bits switched */ OFFSET_0_2 = 0x1, GAIN_0_2 = 0x2, OFFSET_1_3 = 0x4, GAIN_1_3 = 0x8, }; enum data_bits { NOT_CLEAR_REGISTERS = 0x20, }; switch (caldac_channel) { case 0: /* chan 0 offset */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = OFFSET_0_2; break; case 1: /* chan 1 offset */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = OFFSET_1_3; break; case 2: /* chan 2 offset */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = OFFSET_0_2; break; case 3: /* chan 3 offset */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = OFFSET_1_3; break; case 4: /* chan 0 gain */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = GAIN_0_2; break; case 5: /* chan 1 gain */ i2c_addr = CALDAC0_I2C_ADDR; serial_bytes[0] = GAIN_1_3; break; case 6: /* chan 2 gain */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = GAIN_0_2; break; case 7: /* chan 3 gain */ i2c_addr = CALDAC1_I2C_ADDR; serial_bytes[0] = GAIN_1_3; break; default: comedi_error(dev, "invalid caldac channel\n"); return -1; break; } serial_bytes[1] = NOT_CLEAR_REGISTERS | ((value >> 8) & 0xf); serial_bytes[2] = value & 0xff; i2c_write(dev, i2c_addr, serial_bytes, 3); return 0; } /* Their i2c requires a huge delay on setting clock or data high for some reason */ static const int i2c_high_udelay = 1000; static const int i2c_low_udelay = 10; /* set i2c data line high or low */ static void i2c_set_sda(struct comedi_device *dev, int state) { static const int data_bit = CTL_EE_W; void __iomem *plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG; if (state) { /* set data line high */ priv(dev)->plx_control_bits &= ~data_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_high_udelay); } else { /* set data line low */ priv(dev)->plx_control_bits |= data_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_low_udelay); } } /* set i2c clock line high or low */ static void i2c_set_scl(struct comedi_device *dev, int state) { static const int clock_bit = CTL_USERO; void __iomem *plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG; if (state) { /* set clock line high */ priv(dev)->plx_control_bits &= ~clock_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_high_udelay); } else { /* set clock line low */ priv(dev)->plx_control_bits |= clock_bit; writel(priv(dev)->plx_control_bits, plx_control_addr); udelay(i2c_low_udelay); } } static void i2c_write_byte(struct comedi_device *dev, uint8_t byte) { uint8_t bit; unsigned int num_bits = 8; DEBUG_PRINT("writing to i2c byte 0x%x\n", byte); for (bit = 1 << (num_bits - 1); bit; bit >>= 1) { i2c_set_scl(dev, 0); if ((byte & bit)) i2c_set_sda(dev, 1); else i2c_set_sda(dev, 0); i2c_set_scl(dev, 1); } } /* we can't really read the lines, so fake it */ static int i2c_read_ack(struct comedi_device *dev) { i2c_set_scl(dev, 0); i2c_set_sda(dev, 1); i2c_set_scl(dev, 1); return 0; /* return fake acknowledge bit */ } /* send start bit */ static void i2c_start(struct comedi_device *dev) { i2c_set_scl(dev, 1); i2c_set_sda(dev, 1); i2c_set_sda(dev, 0); } /* send stop bit */ static void i2c_stop(struct comedi_device *dev) { i2c_set_scl(dev, 0); i2c_set_sda(dev, 0); i2c_set_scl(dev, 1); i2c_set_sda(dev, 1); } static void i2c_write(struct comedi_device *dev, unsigned int address, const uint8_t * data, unsigned int length) { unsigned int i; uint8_t bitstream; static const int read_bit = 0x1; /* XXX need mutex to prevent simultaneous attempts to access eeprom and i2c bus */ /* make sure we dont send anything to eeprom */ priv(dev)->plx_control_bits &= ~CTL_EE_CS; i2c_stop(dev); i2c_start(dev); /* send address and write bit */ bitstream = (address << 1) & ~read_bit; i2c_write_byte(dev, bitstream); /* get acknowledge */ if (i2c_read_ack(dev) != 0) { comedi_error(dev, "i2c write failed: no acknowledge"); i2c_stop(dev); return; } /* write data bytes */ for (i = 0; i < length; i++) { i2c_write_byte(dev, data[i]); if (i2c_read_ack(dev) != 0) { comedi_error(dev, "i2c write failed: no acknowledge"); i2c_stop(dev); return; } } i2c_stop(dev); } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
yangmenghui/LinuxKernel3_19_V2X
drivers/infiniband/hw/ehca/ehca_eq.c
4295
5097
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * Event queue handling * * Authors: Waleri Fomin <fomin@de.ibm.com> * Khadija Souissi <souissi@de.ibm.com> * Reinhard Ernst <rernst@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> * Hoang-Nam Nguyen <hnguyen@de.ibm.com> * * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "ehca_classes.h" #include "ehca_irq.h" #include "ehca_iverbs.h" #include "ehca_qes.h" #include "hcp_if.h" #include "ipz_pt_fn.h" int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq, const enum ehca_eq_type type, const u32 length) { int ret; u64 h_ret; u32 nr_pages; u32 i; void *vpage; struct ib_device *ib_dev = &shca->ib_device; spin_lock_init(&eq->spinlock); spin_lock_init(&eq->irq_spinlock); eq->is_initialized = 0; if (type != EHCA_EQ && type != EHCA_NEQ) { ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq); return -EINVAL; } if (!length) { ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq); return -EINVAL; } h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle, &eq->pf, type, length, &eq->ipz_eq_handle, &eq->length, &nr_pages, &eq->ist); if (h_ret != H_SUCCESS) { ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq); return -EINVAL; } ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages, EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0); if (!ret) { ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq); goto create_eq_exit1; } for (i = 0; i < nr_pages; i++) { u64 rpage; vpage = ipz_qpageit_get_inc(&eq->ipz_queue); if (!vpage) goto create_eq_exit2; rpage = __pa(vpage); h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, eq->ipz_eq_handle, &eq->pf, 0, 0, rpage, 1); if (i == (nr_pages - 1)) { /* last page */ vpage = ipz_qpageit_get_inc(&eq->ipz_queue); if (h_ret != H_SUCCESS || vpage) goto create_eq_exit2; } else { if (h_ret != H_PAGE_REGISTERED) goto create_eq_exit2; } } ipz_qeit_reset(&eq->ipz_queue); /* register interrupt handlers and initialize work queues */ if (type == EHCA_EQ) { tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, 0, "ehca_eq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); } else if (type == EHCA_NEQ) { tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, 0, "ehca_neq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); } eq->is_initialized = 1; return 0; create_eq_exit2: ipz_queue_dtor(NULL, &eq->ipz_queue); create_eq_exit1: hipz_h_destroy_eq(shca->ipz_hca_handle, eq); return -EINVAL; } void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq) { unsigned long flags; void *eqe; spin_lock_irqsave(&eq->spinlock, flags); eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue); spin_unlock_irqrestore(&eq->spinlock, flags); return eqe; } int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq) { unsigned long flags; u64 h_ret; ibmebus_free_irq(eq->ist, (void *)shca); spin_lock_irqsave(&shca_list_lock, flags); eq->is_initialized = 0; spin_unlock_irqrestore(&shca_list_lock, flags); tasklet_kill(&eq->interrupt_task); h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "Can't free EQ resources."); return -EINVAL; } ipz_queue_dtor(NULL, &eq->ipz_queue); return 0; }
gpl-2.0
MaxiCM-Test/android_kernel_lge_msm8226
drivers/net/ethernet/qlogic/qlge/qlge_main.c
4807
135228
/* * QLogic qlge NIC HBA Driver * Copyright (c) 2003-2008 QLogic Corporation * See LICENSE.qlge for copyright and licensing details. * Author: Linux qlge network device driver by * Ron Mercer <ron.mercer@qlogic.com> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/module.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/dmapool.h> #include <linux/mempool.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ipv6.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/prefetch.h> #include <net/ip6_checksum.h> #include "qlge.h" char qlge_driver_name[] = DRV_NAME; const char qlge_driver_version[] = DRV_VERSION; MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>"); MODULE_DESCRIPTION(DRV_STRING " "); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | /* NETIF_MSG_TIMER | */ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR | /* NETIF_MSG_TX_QUEUED | */ /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ /* NETIF_MSG_PKTDATA | */ NETIF_MSG_HW | NETIF_MSG_WOL | 0; static int debug = -1; /* defaults above */ module_param(debug, int, 0664); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define MSIX_IRQ 0 #define MSI_IRQ 1 #define LEG_IRQ 2 static int qlge_irq_type = MSIX_IRQ; module_param(qlge_irq_type, int, 0664); MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); static int qlge_mpi_coredump; module_param(qlge_mpi_coredump, int, 0); MODULE_PARM_DESC(qlge_mpi_coredump, "Option to enable MPI firmware dump. " "Default is OFF - Do Not allocate memory. "); static int qlge_force_coredump; module_param(qlge_force_coredump, int, 0); MODULE_PARM_DESC(qlge_force_coredump, "Option to allow force of firmware core dump. " "Default is OFF - Do not allow."); static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); static int ql_wol(struct ql_adapter *qdev); static void qlge_set_multicast_list(struct net_device *ndev); /* This hardware semaphore causes exclusive access to * resources shared between the NIC driver, MPI firmware, * FCOE firmware and the FC driver. */ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) { u32 sem_bits = 0; switch (sem_mask) { case SEM_XGMAC0_MASK: sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; break; case SEM_XGMAC1_MASK: sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; break; case SEM_ICB_MASK: sem_bits = SEM_SET << SEM_ICB_SHIFT; break; case SEM_MAC_ADDR_MASK: sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; break; case SEM_FLASH_MASK: sem_bits = SEM_SET << SEM_FLASH_SHIFT; break; case SEM_PROBE_MASK: sem_bits = SEM_SET << SEM_PROBE_SHIFT; break; case SEM_RT_IDX_MASK: sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; break; case SEM_PROC_REG_MASK: sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; break; default: netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); return -EINVAL; } ql_write32(qdev, SEM, sem_bits | sem_mask); return !(ql_read32(qdev, SEM) & sem_bits); } int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) { unsigned int wait_count = 30; do { if (!ql_sem_trylock(qdev, sem_mask)) return 0; udelay(100); } while (--wait_count); return -ETIMEDOUT; } void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) { ql_write32(qdev, SEM, sem_mask); ql_read32(qdev, SEM); /* flush */ } /* This function waits for a specific bit to come ready * in a given register. It is used mostly by the initialize * process, but is also used in kernel thread API such as * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. */ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) { u32 temp; int count = UDELAY_COUNT; while (count) { temp = ql_read32(qdev, reg); /* check for errors */ if (temp & err_bit) { netif_alert(qdev, probe, qdev->ndev, "register 0x%.08x access error, value = 0x%.08x!.\n", reg, temp); return -EIO; } else if (temp & bit) return 0; udelay(UDELAY_DELAY); count--; } netif_alert(qdev, probe, qdev->ndev, "Timed out waiting for reg %x to come ready.\n", reg); return -ETIMEDOUT; } /* The CFG register is used to download TX and RX control blocks * to the chip. This function waits for an operation to complete. */ static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) { int count = UDELAY_COUNT; u32 temp; while (count) { temp = ql_read32(qdev, CFG); if (temp & CFG_LE) return -EIO; if (!(temp & bit)) return 0; udelay(UDELAY_DELAY); count--; } return -ETIMEDOUT; } /* Used to issue init control blocks to hw. Maps control block, * sets address, triggers download, waits for completion. */ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, u16 q_id) { u64 map; int status = 0; int direction; u32 mask; u32 value; direction = (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; map = pci_map_single(qdev->pdev, ptr, size, direction); if (pci_dma_mapping_error(qdev->pdev, map)) { netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); return -ENOMEM; } status = ql_sem_spinlock(qdev, SEM_ICB_MASK); if (status) return status; status = ql_wait_cfg(qdev, bit); if (status) { netif_err(qdev, ifup, qdev->ndev, "Timed out waiting for CFG to come ready.\n"); goto exit; } ql_write32(qdev, ICB_L, (u32) map); ql_write32(qdev, ICB_H, (u32) (map >> 32)); mask = CFG_Q_MASK | (bit << 16); value = bit | (q_id << CFG_Q_SHIFT); ql_write32(qdev, CFG, (mask | value)); /* * Wait for the bit to clear after signaling hw. */ status = ql_wait_cfg(qdev, bit); exit: ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ pci_unmap_single(qdev->pdev, map, size, direction); return status; } /* Get a specific MAC address from the CAM. Used for debug and reg dump. */ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, u32 *value) { u32 offset = 0; int status; switch (type) { case MAC_ADDR_TYPE_MULTI_MAC: case MAC_ADDR_TYPE_CAM_MAC: { status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); if (status) goto exit; *value++ = ql_read32(qdev, MAC_ADDR_DATA); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); if (status) goto exit; *value++ = ql_read32(qdev, MAC_ADDR_DATA); if (type == MAC_ADDR_TYPE_CAM_MAC) { status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); if (status) goto exit; *value++ = ql_read32(qdev, MAC_ADDR_DATA); } break; } case MAC_ADDR_TYPE_VLAN: case MAC_ADDR_TYPE_MULTI_FLTR: default: netif_crit(qdev, ifup, qdev->ndev, "Address type %d not yet supported.\n", type); status = -EPERM; } exit: return status; } /* Set up a MAC, multicast or VLAN address for the * inbound frame matching. */ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, u16 index) { u32 offset = 0; int status = 0; switch (type) { case MAC_ADDR_TYPE_MULTI_MAC: { u32 upper = (addr[0] << 8) | addr[1]; u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | (addr[5]); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | MAC_ADDR_E); ql_write32(qdev, MAC_ADDR_DATA, lower); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | MAC_ADDR_E); ql_write32(qdev, MAC_ADDR_DATA, upper); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; break; } case MAC_ADDR_TYPE_CAM_MAC: { u32 cam_output; u32 upper = (addr[0] << 8) | addr[1]; u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | (addr[5]); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type); /* type */ ql_write32(qdev, MAC_ADDR_DATA, lower); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type); /* type */ ql_write32(qdev, MAC_ADDR_DATA, upper); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type); /* type */ /* This field should also include the queue id and possibly the function id. Right now we hardcode the route field to NIC core. */ cam_output = (CAM_OUT_ROUTE_NIC | (qdev-> func << CAM_OUT_FUNC_SHIFT) | (0 << CAM_OUT_CQ_ID_SHIFT)); if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) cam_output |= CAM_OUT_RV; /* route to NIC core */ ql_write32(qdev, MAC_ADDR_DATA, cam_output); break; } case MAC_ADDR_TYPE_VLAN: { u32 enable_bit = *((u32 *) &addr[0]); /* For VLAN, the addr actually holds a bit that * either enables or disables the vlan id we are * addressing. It's either MAC_ADDR_E on or off. * That's bit-27 we're talking about. */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type | /* type */ enable_bit); /* enable/disable */ break; } case MAC_ADDR_TYPE_MULTI_FLTR: default: netif_crit(qdev, ifup, qdev->ndev, "Address type %d not yet supported.\n", type); status = -EPERM; } exit: return status; } /* Set or clear MAC address in hardware. We sometimes * have to clear it to prevent wrong frame routing * especially in a bonding environment. */ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) { int status; char zero_mac_addr[ETH_ALEN]; char *addr; if (set) { addr = &qdev->current_mac_addr[0]; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Set Mac addr %pM\n", addr); } else { memset(zero_mac_addr, 0, ETH_ALEN); addr = &zero_mac_addr[0]; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Clearing MAC address\n"); } status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; status = ql_set_mac_addr_reg(qdev, (u8 *) addr, MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); return status; } void ql_link_on(struct ql_adapter *qdev) { netif_err(qdev, link, qdev->ndev, "Link is up.\n"); netif_carrier_on(qdev->ndev); ql_set_mac_addr(qdev, 1); } void ql_link_off(struct ql_adapter *qdev) { netif_err(qdev, link, qdev->ndev, "Link is down.\n"); netif_carrier_off(qdev->ndev); ql_set_mac_addr(qdev, 0); } /* Get a specific frame routing value from the CAM. * Used for debug and reg dump. */ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) { int status = 0; status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); if (status) goto exit; ql_write32(qdev, RT_IDX, RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); if (status) goto exit; *value = ql_read32(qdev, RT_DATA); exit: return status; } /* The NIC function for this chip has 16 routing indexes. Each one can be used * to route different frame types to various inbound queues. We send broadcast/ * multicast/error frames to the default queue for slow handling, * and CAM hit/RSS frames to the fast handling queues. */ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, int enable) { int status = -EINVAL; /* Return error if no mask match. */ u32 value = 0; switch (mask) { case RT_IDX_CAM_HIT: { value = RT_IDX_DST_CAM_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_VALID: /* Promiscuous Mode frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_IP_CSUM_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */ break; } case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */ break; } case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_MCAST: /* Pass up All Multicast frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ { value = RT_IDX_DST_RSS | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case 0: /* Clear the E-bit on an entry. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (index << RT_IDX_IDX_SHIFT);/* index */ break; } default: netif_err(qdev, ifup, qdev->ndev, "Mask type %d not yet supported.\n", mask); status = -EPERM; goto exit; } if (value) { status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); if (status) goto exit; value |= (enable ? RT_IDX_E : 0); ql_write32(qdev, RT_IDX, value); ql_write32(qdev, RT_DATA, enable ? mask : 0); } exit: return status; } static void ql_enable_interrupts(struct ql_adapter *qdev) { ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); } static void ql_disable_interrupts(struct ql_adapter *qdev) { ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); } /* If we're running with multiple MSI-X vectors then we enable on the fly. * Otherwise, we may have multiple outstanding workers and don't want to * enable until the last one finishes. In this case, the irq_cnt gets * incremented every time we queue a worker and decremented every time * a worker finishes. Once it hits zero we enable the interrupt. */ u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { u32 var = 0; unsigned long hw_flags = 0; struct intr_context *ctx = qdev->intr_context + intr; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { /* Always enable if we're MSIX multi interrupts and * it's not the default (zeroeth) interrupt. */ ql_write32(qdev, INTR_EN, ctx->intr_en_mask); var = ql_read32(qdev, STS); return var; } spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (atomic_dec_and_test(&ctx->irq_cnt)) { ql_write32(qdev, INTR_EN, ctx->intr_en_mask); var = ql_read32(qdev, STS); } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return var; } static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { u32 var = 0; struct intr_context *ctx; /* HW disables for us if we're MSIX multi interrupts and * it's not the default (zeroeth) interrupt. */ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) return 0; ctx = qdev->intr_context + intr; spin_lock(&qdev->hw_lock); if (!atomic_read(&ctx->irq_cnt)) { ql_write32(qdev, INTR_EN, ctx->intr_dis_mask); var = ql_read32(qdev, STS); } atomic_inc(&ctx->irq_cnt); spin_unlock(&qdev->hw_lock); return var; } static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) { int i; for (i = 0; i < qdev->intr_count; i++) { /* The enable call does a atomic_dec_and_test * and enables only if the result is zero. * So we precharge it here. */ if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || i == 0)) atomic_set(&qdev->intr_context[i].irq_cnt, 1); ql_enable_completion_interrupt(qdev, i); } } static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) { int status, i; u16 csum = 0; __le16 *flash = (__le16 *)&qdev->flash; status = strncmp((char *)&qdev->flash, str, 4); if (status) { netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); return status; } for (i = 0; i < size; i++) csum += le16_to_cpu(*flash++); if (csum) netif_err(qdev, ifup, qdev->ndev, "Invalid flash checksum, csum = 0x%.04x.\n", csum); return csum; } static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); if (status) goto exit; /* set up for reg read */ ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); if (status) goto exit; /* This data is stored on flash as an array of * __le32. Since ql_read32() returns cpu endian * we need to swap it back. */ *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); exit: return status; } static int ql_get_8000_flash_params(struct ql_adapter *qdev) { u32 i, size; int status; __le32 *p = (__le32 *)&qdev->flash; u32 offset; u8 mac_addr[6]; /* Get flash offset for function and adjust * for dword access. */ if (!qdev->port) offset = FUNC0_FLASH_OFFSET / sizeof(u32); else offset = FUNC1_FLASH_OFFSET / sizeof(u32); if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) return -ETIMEDOUT; size = sizeof(struct flash_params_8000) / sizeof(u32); for (i = 0; i < size; i++, p++) { status = ql_read_flash_word(qdev, i+offset, p); if (status) { netif_err(qdev, ifup, qdev->ndev, "Error reading flash.\n"); goto exit; } } status = ql_validate_flash(qdev, sizeof(struct flash_params_8000) / sizeof(u16), "8000"); if (status) { netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); status = -EINVAL; goto exit; } /* Extract either manufacturer or BOFM modified * MAC address. */ if (qdev->flash.flash_params_8000.data_type1 == 2) memcpy(mac_addr, qdev->flash.flash_params_8000.mac_addr1, qdev->ndev->addr_len); else memcpy(mac_addr, qdev->flash.flash_params_8000.mac_addr, qdev->ndev->addr_len); if (!is_valid_ether_addr(mac_addr)) { netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); status = -EINVAL; goto exit; } memcpy(qdev->ndev->dev_addr, mac_addr, qdev->ndev->addr_len); exit: ql_sem_unlock(qdev, SEM_FLASH_MASK); return status; } static int ql_get_8012_flash_params(struct ql_adapter *qdev) { int i; int status; __le32 *p = (__le32 *)&qdev->flash; u32 offset = 0; u32 size = sizeof(struct flash_params_8012) / sizeof(u32); /* Second function's parameters follow the first * function's. */ if (qdev->port) offset = size; if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) return -ETIMEDOUT; for (i = 0; i < size; i++, p++) { status = ql_read_flash_word(qdev, i+offset, p); if (status) { netif_err(qdev, ifup, qdev->ndev, "Error reading flash.\n"); goto exit; } } status = ql_validate_flash(qdev, sizeof(struct flash_params_8012) / sizeof(u16), "8012"); if (status) { netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); status = -EINVAL; goto exit; } if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { status = -EINVAL; goto exit; } memcpy(qdev->ndev->dev_addr, qdev->flash.flash_params_8012.mac_addr, qdev->ndev->addr_len); exit: ql_sem_unlock(qdev, SEM_FLASH_MASK); return status; } /* xgmac register are located behind the xgmac_addr and xgmac_data * register pair. Each read/write requires us to wait for the ready * bit before reading/writing the data. */ static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) { int status; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) return status; /* write the data to the data reg */ ql_write32(qdev, XGMAC_DATA, data); /* trigger the write */ ql_write32(qdev, XGMAC_ADDR, reg); return status; } /* xgmac register are located behind the xgmac_addr and xgmac_data * register pair. Each read/write requires us to wait for the ready * bit before reading/writing the data. */ int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* set up for reg read */ ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* get the data */ *data = ql_read32(qdev, XGMAC_DATA); exit: return status; } /* This is used for reading the 64-bit statistics regs. */ int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) { int status = 0; u32 hi = 0; u32 lo = 0; status = ql_read_xgmac_reg(qdev, reg, &lo); if (status) goto exit; status = ql_read_xgmac_reg(qdev, reg + 4, &hi); if (status) goto exit; *data = (u64) lo | ((u64) hi << 32); exit: return status; } static int ql_8000_port_initialize(struct ql_adapter *qdev) { int status; /* * Get MPI firmware version for driver banner * and ethool info. */ status = ql_mb_about_fw(qdev); if (status) goto exit; status = ql_mb_get_fw_state(qdev); if (status) goto exit; /* Wake up a worker to get/set the TX/RX frame sizes. */ queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); exit: return status; } /* Take the MAC Core out of reset. * Enable statistics counting. * Take the transmitter/receiver out of reset. * This functionality may be done in the MPI firmware at a * later date. */ static int ql_8012_port_initialize(struct ql_adapter *qdev) { int status = 0; u32 data; if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { /* Another function has the semaphore, so * wait for the port init bit to come ready. */ netif_info(qdev, link, qdev->ndev, "Another function has the semaphore, so wait for the port init bit to come ready.\n"); status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); if (status) { netif_crit(qdev, link, qdev->ndev, "Port initialize timed out.\n"); } return status; } netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); /* Set the core reset. */ status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); if (status) goto end; data |= GLOBAL_CFG_RESET; status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); if (status) goto end; /* Clear the core reset and turn on jumbo for receiver. */ data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ data |= GLOBAL_CFG_TX_STAT_EN; data |= GLOBAL_CFG_RX_STAT_EN; status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); if (status) goto end; /* Enable transmitter, and clear it's reset. */ status = ql_read_xgmac_reg(qdev, TX_CFG, &data); if (status) goto end; data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ data |= TX_CFG_EN; /* Enable the transmitter. */ status = ql_write_xgmac_reg(qdev, TX_CFG, data); if (status) goto end; /* Enable receiver and clear it's reset. */ status = ql_read_xgmac_reg(qdev, RX_CFG, &data); if (status) goto end; data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ data |= RX_CFG_EN; /* Enable the receiver. */ status = ql_write_xgmac_reg(qdev, RX_CFG, data); if (status) goto end; /* Turn on jumbo. */ status = ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); if (status) goto end; status = ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); if (status) goto end; /* Signal to the world that the port is enabled. */ ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); end: ql_sem_unlock(qdev, qdev->xg_sem_mask); return status; } static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) { return PAGE_SIZE << qdev->lbq_buf_order; } /* Get the next large buffer. */ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) { struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; rx_ring->lbq_curr_idx++; if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) rx_ring->lbq_curr_idx = 0; rx_ring->lbq_free_cnt++; return lbq_desc; } static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, struct rx_ring *rx_ring) { struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr(lbq_desc, mapaddr), rx_ring->lbq_buf_size, PCI_DMA_FROMDEVICE); /* If it's the last chunk of our master page then * we unmap it. */ if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) == ql_lbq_block_size(qdev)) pci_unmap_page(qdev->pdev, lbq_desc->p.pg_chunk.map, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); return lbq_desc; } /* Get the next small buffer. */ static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) { struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; rx_ring->sbq_curr_idx++; if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) rx_ring->sbq_curr_idx = 0; rx_ring->sbq_free_cnt++; return sbq_desc; } /* Update an rx ring index. */ static void ql_update_cq(struct rx_ring *rx_ring) { rx_ring->cnsmr_idx++; rx_ring->curr_entry++; if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { rx_ring->cnsmr_idx = 0; rx_ring->curr_entry = rx_ring->cq_base; } } static void ql_write_cq_idx(struct rx_ring *rx_ring) { ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); } static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct bq_desc *lbq_desc) { if (!rx_ring->pg_chunk.page) { u64 map; rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, qdev->lbq_buf_order); if (unlikely(!rx_ring->pg_chunk.page)) { netif_err(qdev, drv, qdev->ndev, "page allocation failed.\n"); return -ENOMEM; } rx_ring->pg_chunk.offset = 0; map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, 0, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(qdev->pdev, map)) { __free_pages(rx_ring->pg_chunk.page, qdev->lbq_buf_order); netif_err(qdev, drv, qdev->ndev, "PCI mapping failed.\n"); return -ENOMEM; } rx_ring->pg_chunk.map = map; rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); } /* Copy the current master pg_chunk info * to the current descriptor. */ lbq_desc->p.pg_chunk = rx_ring->pg_chunk; /* Adjust the master page chunk for next * buffer get. */ rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { rx_ring->pg_chunk.page = NULL; lbq_desc->p.pg_chunk.last_flag = 1; } else { rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; get_page(rx_ring->pg_chunk.page); lbq_desc->p.pg_chunk.last_flag = 0; } return 0; } /* Process (refill) a large buffer queue. */ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) { u32 clean_idx = rx_ring->lbq_clean_idx; u32 start_idx = clean_idx; struct bq_desc *lbq_desc; u64 map; int i; while (rx_ring->lbq_free_cnt > 32) { for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "lbq: try cleaning clean_idx = %d.\n", clean_idx); lbq_desc = &rx_ring->lbq[clean_idx]; if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { rx_ring->lbq_clean_idx = clean_idx; netif_err(qdev, ifup, qdev->ndev, "Could not get a page chunk, i=%d, clean_idx =%d .\n", i, clean_idx); return; } map = lbq_desc->p.pg_chunk.map + lbq_desc->p.pg_chunk.offset; dma_unmap_addr_set(lbq_desc, mapaddr, map); dma_unmap_len_set(lbq_desc, maplen, rx_ring->lbq_buf_size); *lbq_desc->addr = cpu_to_le64(map); pci_dma_sync_single_for_device(qdev->pdev, map, rx_ring->lbq_buf_size, PCI_DMA_FROMDEVICE); clean_idx++; if (clean_idx == rx_ring->lbq_len) clean_idx = 0; } rx_ring->lbq_clean_idx = clean_idx; rx_ring->lbq_prod_idx += 16; if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) rx_ring->lbq_prod_idx = 0; rx_ring->lbq_free_cnt -= 16; } if (start_idx != clean_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "lbq: updating prod idx = %d.\n", rx_ring->lbq_prod_idx); ql_write_db_reg(rx_ring->lbq_prod_idx, rx_ring->lbq_prod_idx_db_reg); } } /* Process (refill) a small buffer queue. */ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) { u32 clean_idx = rx_ring->sbq_clean_idx; u32 start_idx = clean_idx; struct bq_desc *sbq_desc; u64 map; int i; while (rx_ring->sbq_free_cnt > 16) { for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { sbq_desc = &rx_ring->sbq[clean_idx]; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "sbq: try cleaning clean_idx = %d.\n", clean_idx); if (sbq_desc->p.skb == NULL) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "sbq: getting new skb for index %d.\n", sbq_desc->index); sbq_desc->p.skb = netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE); if (sbq_desc->p.skb == NULL) { netif_err(qdev, probe, qdev->ndev, "Couldn't get an skb.\n"); rx_ring->sbq_clean_idx = clean_idx; return; } skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); map = pci_map_single(qdev->pdev, sbq_desc->p.skb->data, rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(qdev->pdev, map)) { netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n"); rx_ring->sbq_clean_idx = clean_idx; dev_kfree_skb_any(sbq_desc->p.skb); sbq_desc->p.skb = NULL; return; } dma_unmap_addr_set(sbq_desc, mapaddr, map); dma_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size); *sbq_desc->addr = cpu_to_le64(map); } clean_idx++; if (clean_idx == rx_ring->sbq_len) clean_idx = 0; } rx_ring->sbq_clean_idx = clean_idx; rx_ring->sbq_prod_idx += 16; if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) rx_ring->sbq_prod_idx = 0; rx_ring->sbq_free_cnt -= 16; } if (start_idx != clean_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "sbq: updating prod idx = %d.\n", rx_ring->sbq_prod_idx); ql_write_db_reg(rx_ring->sbq_prod_idx, rx_ring->sbq_prod_idx_db_reg); } } static void ql_update_buffer_queues(struct ql_adapter *qdev, struct rx_ring *rx_ring) { ql_update_sbq(qdev, rx_ring); ql_update_lbq(qdev, rx_ring); } /* Unmaps tx buffers. Can be called from send() if a pci mapping * fails at some stage, or from the interrupt when a tx completes. */ static void ql_unmap_send(struct ql_adapter *qdev, struct tx_ring_desc *tx_ring_desc, int mapped) { int i; for (i = 0; i < mapped; i++) { if (i == 0 || (i == 7 && mapped > 7)) { /* * Unmap the skb->data area, or the * external sglist (AKA the Outbound * Address List (OAL)). * If its the zeroeth element, then it's * the skb->data area. If it's the 7th * element and there is more than 6 frags, * then its an OAL. */ if (i == 7) { netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, "unmapping OAL area.\n"); } pci_unmap_single(qdev->pdev, dma_unmap_addr(&tx_ring_desc->map[i], mapaddr), dma_unmap_len(&tx_ring_desc->map[i], maplen), PCI_DMA_TODEVICE); } else { netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, "unmapping frag %d.\n", i); pci_unmap_page(qdev->pdev, dma_unmap_addr(&tx_ring_desc->map[i], mapaddr), dma_unmap_len(&tx_ring_desc->map[i], maplen), PCI_DMA_TODEVICE); } } } /* Map the buffers for this transmit. This will return * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */ static int ql_map_send(struct ql_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) { int len = skb_headlen(skb); dma_addr_t map; int frag_idx, err, map_idx = 0; struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; int frag_cnt = skb_shinfo(skb)->nr_frags; if (frag_cnt) { netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "frag_cnt = %d.\n", frag_cnt); } /* * Map the skb buffer first. */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netif_err(qdev, tx_queued, qdev->ndev, "PCI mapping failed with error: %d\n", err); return NETDEV_TX_BUSY; } tbd->len = cpu_to_le32(len); tbd->addr = cpu_to_le64(map); dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); map_idx++; /* * This loop fills the remainder of the 8 address descriptors * in the IOCB. If there are more than 7 fragments, then the * eighth address desc will point to an external list (OAL). * When this happens, the remainder of the frags will be stored * in this list. */ for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; tbd++; if (frag_idx == 6 && frag_cnt > 7) { /* Let's tack on an sglist. * Our control block will now * look like this: * iocb->seg[0] = skb->data * iocb->seg[1] = frag[0] * iocb->seg[2] = frag[1] * iocb->seg[3] = frag[2] * iocb->seg[4] = frag[3] * iocb->seg[5] = frag[4] * iocb->seg[6] = frag[5] * iocb->seg[7] = ptr to OAL (external sglist) * oal->seg[0] = frag[6] * oal->seg[1] = frag[7] * oal->seg[2] = frag[8] * oal->seg[3] = frag[9] * oal->seg[4] = frag[10] * etc... */ /* Tack on the OAL in the eighth segment of IOCB. */ map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, sizeof(struct oal), PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netif_err(qdev, tx_queued, qdev->ndev, "PCI mapping outbound address list with error: %d\n", err); goto map_error; } tbd->addr = cpu_to_le64(map); /* * The length is the number of fragments * that remain to be mapped times the length * of our sglist (OAL). */ tbd->len = cpu_to_le32((sizeof(struct tx_buf_desc) * (frag_cnt - frag_idx)) | TX_DESC_C); dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, sizeof(struct oal)); tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; map_idx++; } map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netif_err(qdev, tx_queued, qdev->ndev, "PCI mapping frags failed with error: %d.\n", err); goto map_error; } tbd->addr = cpu_to_le64(map); tbd->len = cpu_to_le32(skb_frag_size(frag)); dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, skb_frag_size(frag)); } /* Save the number of segments we've mapped. */ tx_ring_desc->map_cnt = map_idx; /* Terminate the last segment. */ tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); return NETDEV_TX_OK; map_error: /* * If the first frag mapping failed, then i will be zero. * This causes the unmap of the skb->data area. Otherwise * we pass in the number of frags that mapped successfully * so they can be umapped. */ ql_unmap_send(qdev, tx_ring_desc, map_idx); return NETDEV_TX_BUSY; } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u32 length, u16 vlan_id) { struct sk_buff *skb; struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; napi->dev = qdev->ndev; skb = napi_get_frags(napi); if (!skb) { netif_err(qdev, drv, qdev->ndev, "Couldn't get an skb, exiting.\n"); rx_ring->rx_dropped++; put_page(lbq_desc->p.pg_chunk.page); return; } prefetch(lbq_desc->p.pg_chunk.va); __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, length); skb->len += length; skb->data_len += length; skb->truesize += length; skb_shinfo(skb)->nr_frags++; rx_ring->rx_packets++; rx_ring->rx_bytes += length; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) __vlan_hwaccel_put_tag(skb, vlan_id); napi_gro_frags(napi); } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_rx_page(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u32 length, u16 vlan_id) { struct net_device *ndev = qdev->ndev; struct sk_buff *skb = NULL; void *addr; struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; skb = netdev_alloc_skb(ndev, length); if (!skb) { netif_err(qdev, drv, qdev->ndev, "Couldn't get an skb, need to unwind!.\n"); rx_ring->rx_dropped++; put_page(lbq_desc->p.pg_chunk.page); return; } addr = lbq_desc->p.pg_chunk.va; prefetch(addr); /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); rx_ring->rx_errors++; goto err_out; } /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ if (skb->len > ndev->mtu + ETH_HLEN) { netif_err(qdev, drv, qdev->ndev, "Segment too small, dropping.\n"); rx_ring->rx_dropped++; goto err_out; } memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset+ETH_HLEN, length-ETH_HLEN); skb->len += length-ETH_HLEN; skb->data_len += length-ETH_HLEN; skb->truesize += length-ETH_HLEN; rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); if ((ndev->features & NETIF_F_RXCSUM) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { /* TCP frame. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); skb->ip_summed = CHECKSUM_UNNECESSARY; } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) ((u8 *)addr + ETH_HLEN); if (!(iph->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "UDP checksum done!\n"); } } } skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) __vlan_hwaccel_put_tag(skb, vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(napi, skb); else netif_receive_skb(skb); return; err_out: dev_kfree_skb_any(skb); put_page(lbq_desc->p.pg_chunk.page); } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u32 length, u16 vlan_id) { struct net_device *ndev = qdev->ndev; struct sk_buff *skb = NULL; struct sk_buff *new_skb = NULL; struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); skb = sbq_desc->p.skb; /* Allocate new_skb and copy */ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); if (new_skb == NULL) { netif_err(qdev, probe, qdev->ndev, "No skb available, drop the packet.\n"); rx_ring->rx_dropped++; return; } skb_reserve(new_skb, NET_IP_ALIGN); memcpy(skb_put(new_skb, length), skb->data, length); skb = new_skb; /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); dev_kfree_skb_any(skb); rx_ring->rx_errors++; return; } /* loopback self test for ethtool */ if (test_bit(QL_SELFTEST, &qdev->flags)) { ql_check_lb_frame(qdev, skb); dev_kfree_skb_any(skb); return; } /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ if (skb->len > ndev->mtu + ETH_HLEN) { dev_kfree_skb_any(skb); rx_ring->rx_dropped++; return; } prefetch(skb->data); skb->dev = ndev; if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_HASH ? "Hash" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_REG ? "Registered" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); } if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Promiscuous Packet.\n"); rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); /* If rx checksum is on, and there are no * csum or frame errors. */ if ((ndev->features & NETIF_F_RXCSUM) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { /* TCP frame. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); skb->ip_summed = CHECKSUM_UNNECESSARY; } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; if (!(iph->frag_off & ntohs(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "UDP checksum done!\n"); } } } skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) __vlan_hwaccel_put_tag(skb, vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ring->napi, skb); else netif_receive_skb(skb); } static void ql_realign_skb(struct sk_buff *skb, int len) { void *temp_addr = skb->data; /* Undo the skb_reserve(skb,32) we did before * giving to hardware, and realign data on * a 2-byte boundary. */ skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; skb_copy_to_linear_data(skb, temp_addr, (unsigned int)len); } /* * This function builds an skb for the given inbound * completion. It will be rewritten for readability in the near * future, but for not it works well. */ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp) { struct bq_desc *lbq_desc; struct bq_desc *sbq_desc; struct sk_buff *skb = NULL; u32 length = le32_to_cpu(ib_mac_rsp->data_len); u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); /* * Handle the header buffer if present. */ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Header of %d bytes in small buffer.\n", hdr_len); /* * Headers fit nicely into a small buffer. */ sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); skb = sbq_desc->p.skb; ql_realign_skb(skb, hdr_len); skb_put(skb, hdr_len); sbq_desc->p.skb = NULL; } /* * Handle the data buffer(s). */ if (unlikely(!length)) { /* Is there data too? */ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "No Data buffer in this packet.\n"); return skb; } if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Headers in small, data of %d bytes in small, combine them.\n", length); /* * Data is less than small buffer size so it's * stuffed in a small buffer. * For this case we append the data * from the "data" small buffer to the "header" small * buffer. */ sbq_desc = ql_get_curr_sbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr (sbq_desc, mapaddr), dma_unmap_len (sbq_desc, maplen), PCI_DMA_FROMDEVICE); memcpy(skb_put(skb, length), sbq_desc->p.skb->data, length); pci_dma_sync_single_for_device(qdev->pdev, dma_unmap_addr (sbq_desc, mapaddr), dma_unmap_len (sbq_desc, maplen), PCI_DMA_FROMDEVICE); } else { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes in a single small buffer.\n", length); sbq_desc = ql_get_curr_sbuf(rx_ring); skb = sbq_desc->p.skb; ql_realign_skb(skb, length); skb_put(skb, length); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); sbq_desc->p.skb = NULL; } } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Header in small, %d bytes in large. Chain large to small!\n", length); /* * The data is in a single large buffer. We * chain it to the header buffer's skb and let * it rip. */ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Chaining page at offset = %d, for %d bytes to skb.\n", lbq_desc->p.pg_chunk.offset, length); skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, length); skb->len += length; skb->data_len += length; skb->truesize += length; } else { /* * The headers and data are in a single large buffer. We * copy it to a new skb and let it go. This can happen with * jumbo mtu on a non-TCP/UDP frame. */ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); skb = netdev_alloc_skb(qdev->ndev, length); if (skb == NULL) { netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, "No skb available, drop the packet.\n"); return NULL; } pci_unmap_page(qdev->pdev, dma_unmap_addr(lbq_desc, mapaddr), dma_unmap_len(lbq_desc, maplen), PCI_DMA_FROMDEVICE); skb_reserve(skb, NET_IP_ALIGN); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, length); skb->len += length; skb->data_len += length; skb->truesize += length; length -= length; __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? VLAN_ETH_HLEN : ETH_HLEN); } } else { /* * The data is in a chain of large buffers * pointed to by a small buffer. We loop * thru and chain them to the our small header * buffer's skb. * frags: There are 18 max frags and our small * buffer will hold 32 of them. The thing is, * we'll use 3 max for our 9000 byte jumbo * frames. If the MTU goes up we could * eventually be in trouble. */ int size, i = 0; sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { /* * This is an non TCP/UDP IP frame, so * the headers aren't split into a small * buffer. We have to use the small buffer * that contains our sg list as our skb to * send upstairs. Copy the sg list here to * a local buffer and use it to find the * pages to chain. */ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes of headers & data in chain of large.\n", length); skb = sbq_desc->p.skb; sbq_desc->p.skb = NULL; skb_reserve(skb, NET_IP_ALIGN); } while (length > 0) { lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); size = (length < rx_ring->lbq_buf_size) ? length : rx_ring->lbq_buf_size; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Adding page %d to skb for %d bytes.\n", i, size); skb_fill_page_desc(skb, i, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, size); skb->len += size; skb->data_len += size; skb->truesize += size; length -= size; i++; } __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? VLAN_ETH_HLEN : ETH_HLEN); } return skb; } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u16 vlan_id) { struct net_device *ndev = qdev->ndev; struct sk_buff *skb = NULL; QL_DUMP_IB_MAC_RSP(ib_mac_rsp); skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); if (unlikely(!skb)) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "No skb available, drop packet.\n"); rx_ring->rx_dropped++; return; } /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); dev_kfree_skb_any(skb); rx_ring->rx_errors++; return; } /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ if (skb->len > ndev->mtu + ETH_HLEN) { dev_kfree_skb_any(skb); rx_ring->rx_dropped++; return; } /* loopback self test for ethtool */ if (test_bit(QL_SELFTEST, &qdev->flags)) { ql_check_lb_frame(qdev, skb); dev_kfree_skb_any(skb); return; } prefetch(skb->data); skb->dev = ndev; if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_HASH ? "Hash" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_REG ? "Registered" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); rx_ring->rx_multicast++; } if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Promiscuous Packet.\n"); } skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); /* If rx checksum is on, and there are no * csum or frame errors. */ if ((ndev->features & NETIF_F_RXCSUM) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { /* TCP frame. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); skb->ip_summed = CHECKSUM_UNNECESSARY; } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; if (!(iph->frag_off & ntohs(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); } } } rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; skb_record_rx_queue(skb, rx_ring->cq_id); if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) __vlan_hwaccel_put_tag(skb, vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ring->napi, skb); else netif_receive_skb(skb); } /* Process an inbound completion from an rx ring. */ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp) { u32 length = le32_to_cpu(ib_mac_rsp->data_len); u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? ((le16_to_cpu(ib_mac_rsp->vlan_id) & IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; QL_DUMP_IB_MAC_RSP(ib_mac_rsp); if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { /* The data and headers are split into * separate buffers. */ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, vlan_id); } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { /* The data fit in a single small buffer. * Allocate a new skb, copy the data and * return the buffer to the free pool. */ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length, vlan_id); } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { /* TCP packet in a page chunk that's been checksummed. * Tack it on to our GRO skb and let it go. */ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length, vlan_id); } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { /* Non-TCP packet in a page chunk. Allocate an * skb, tack it on frags, and send it up. */ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length, vlan_id); } else { /* Non-TCP/UDP large frames that span multiple buffers * can be processed corrrectly by the split frame logic. */ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, vlan_id); } return (unsigned long)length; } /* Process an outbound completion from an rx ring. */ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *mac_rsp) { struct tx_ring *tx_ring; struct tx_ring_desc *tx_ring_desc; QL_DUMP_OB_MAC_RSP(mac_rsp); tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; tx_ring_desc = &tx_ring->q[mac_rsp->tid]; ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); tx_ring->tx_bytes += (tx_ring_desc->skb)->len; tx_ring->tx_packets++; dev_kfree_skb(tx_ring_desc->skb); tx_ring_desc->skb = NULL; if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S | OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { netif_warn(qdev, tx_done, qdev->ndev, "Total descriptor length did not match transfer length.\n"); } if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { netif_warn(qdev, tx_done, qdev->ndev, "Frame too short to be valid, not sent.\n"); } if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { netif_warn(qdev, tx_done, qdev->ndev, "Frame too long, but sent anyway.\n"); } if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { netif_warn(qdev, tx_done, qdev->ndev, "PCI backplane error. Frame not sent.\n"); } } atomic_inc(&tx_ring->tx_count); } /* Fire up a handler to reset the MPI processor. */ void ql_queue_fw_error(struct ql_adapter *qdev) { ql_link_off(qdev); queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); } void ql_queue_asic_error(struct ql_adapter *qdev) { ql_link_off(qdev); ql_disable_interrupts(qdev); /* Clear adapter up bit to signal the recovery * process that it shouldn't kill the reset worker * thread */ clear_bit(QL_ADAPTER_UP, &qdev->flags); /* Set asic recovery bit to indicate reset process that we are * in fatal error recovery process rather than normal close */ set_bit(QL_ASIC_RECOVERY, &qdev->flags); queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); } static void ql_process_chip_ae_intr(struct ql_adapter *qdev, struct ib_ae_iocb_rsp *ib_ae_rsp) { switch (ib_ae_rsp->event) { case MGMT_ERR_EVENT: netif_err(qdev, rx_err, qdev->ndev, "Management Processor Fatal Error.\n"); ql_queue_fw_error(qdev); return; case CAM_LOOKUP_ERR_EVENT: netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); netdev_err(qdev->ndev, "This event shouldn't occur.\n"); ql_queue_asic_error(qdev); return; case SOFT_ECC_ERROR_EVENT: netdev_err(qdev->ndev, "Soft ECC error detected.\n"); ql_queue_asic_error(qdev); break; case PCI_ERR_ANON_BUF_RD: netdev_err(qdev->ndev, "PCI error occurred when reading " "anonymous buffers from rx_ring %d.\n", ib_ae_rsp->q_id); ql_queue_asic_error(qdev); break; default: netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", ib_ae_rsp->event); ql_queue_asic_error(qdev); break; } } static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) { struct ql_adapter *qdev = rx_ring->qdev; u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); struct ob_mac_iocb_rsp *net_rsp = NULL; int count = 0; struct tx_ring *tx_ring; /* While there are entries in the completion queue. */ while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; rmb(); switch (net_rsp->opcode) { case OPCODE_OB_MAC_TSO_IOCB: case OPCODE_OB_MAC_IOCB: ql_process_mac_tx_intr(qdev, net_rsp); break; default: netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Hit default case, not handled! dropping the packet, opcode = %x.\n", net_rsp->opcode); } count++; ql_update_cq(rx_ring); prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); } if (!net_rsp) return 0; ql_write_cq_idx(rx_ring); tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { if (atomic_read(&tx_ring->queue_stopped) && (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) /* * The queue got stopped because the tx_ring was full. * Wake it up, because it's now at least 25% empty. */ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); } return count; } static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) { struct ql_adapter *qdev = rx_ring->qdev; u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); struct ql_net_rsp_iocb *net_rsp; int count = 0; /* While there are entries in the completion queue. */ while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = rx_ring->curr_entry; rmb(); switch (net_rsp->opcode) { case OPCODE_IB_MAC_IOCB: ql_process_mac_rx_intr(qdev, rx_ring, (struct ib_mac_iocb_rsp *) net_rsp); break; case OPCODE_IB_AE_IOCB: ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) net_rsp); break; default: netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Hit default case, not handled! dropping the packet, opcode = %x.\n", net_rsp->opcode); break; } count++; ql_update_cq(rx_ring); prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); if (count == budget) break; } ql_update_buffer_queues(qdev, rx_ring); ql_write_cq_idx(rx_ring); return count; } static int ql_napi_poll_msix(struct napi_struct *napi, int budget) { struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); struct ql_adapter *qdev = rx_ring->qdev; struct rx_ring *trx_ring; int i, work_done = 0; struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); /* Service the TX rings first. They start * right after the RSS rings. */ for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { trx_ring = &qdev->rx_ring[i]; /* If this TX completion ring belongs to this vector and * it's not empty then service it. */ if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != trx_ring->cnsmr_idx)) { netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, "%s: Servicing TX completion ring %d.\n", __func__, trx_ring->cq_id); ql_clean_outbound_rx_ring(trx_ring); } } /* * Now service the RSS ring if it's active. */ if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, "%s: Servicing RX completion ring %d.\n", __func__, rx_ring->cq_id); work_done = ql_clean_inbound_rx_ring(rx_ring, budget); } if (work_done < budget) { napi_complete(napi); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; } static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) { struct ql_adapter *qdev = netdev_priv(ndev); if (features & NETIF_F_HW_VLAN_RX) { ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | NIC_RCV_CFG_VLAN_MATCH_AND_NON); } else { ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); } } static netdev_features_t qlge_fix_features(struct net_device *ndev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_RX) features |= NETIF_F_HW_VLAN_TX; else features &= ~NETIF_F_HW_VLAN_TX; return features; } static int qlge_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) qlge_vlan_mode(ndev, features); return 0; } static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = MAC_ADDR_E; int err; err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid); if (err) netif_err(qdev, ifup, qdev->ndev, "Failed to init vlan address.\n"); return err; } static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; err = __qlge_vlan_rx_add_vid(qdev, vid); set_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return err; } static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = 0; int err; err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid); if (err) netif_err(qdev, ifup, qdev->ndev, "Failed to clear vlan address.\n"); return err; } static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; err = __qlge_vlan_rx_kill_vid(qdev, vid); clear_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return err; } static void qlge_restore_vlan(struct ql_adapter *qdev) { int status; u16 vid; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return; for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) __qlge_vlan_rx_add_vid(qdev, vid); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) { struct rx_ring *rx_ring = dev_id; napi_schedule(&rx_ring->napi); return IRQ_HANDLED; } /* This handles a fatal error, MPI activity, and the default * rx_ring in an MSI-X multiple vector environment. * In MSI/Legacy environment it also process the rest of * the rx_rings. */ static irqreturn_t qlge_isr(int irq, void *dev_id) { struct rx_ring *rx_ring = dev_id; struct ql_adapter *qdev = rx_ring->qdev; struct intr_context *intr_context = &qdev->intr_context[0]; u32 var; int work_done = 0; spin_lock(&qdev->hw_lock); if (atomic_read(&qdev->intr_context[0].irq_cnt)) { netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, "Shared Interrupt, Not ours!\n"); spin_unlock(&qdev->hw_lock); return IRQ_NONE; } spin_unlock(&qdev->hw_lock); var = ql_disable_completion_interrupt(qdev, intr_context->intr); /* * Check for fatal error. */ if (var & STS_FE) { ql_queue_asic_error(qdev); netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); var = ql_read32(qdev, ERR_STS); netdev_err(qdev->ndev, "Resetting chip. " "Error Status Register = 0x%x\n", var); return IRQ_HANDLED; } /* * Check MPI processor activity. */ if ((var & STS_PI) && (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { /* * We've got an async event or mailbox completion. * Handle it and clear the source of the interrupt. */ netif_err(qdev, intr, qdev->ndev, "Got MPI processor interrupt.\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); queue_delayed_work_on(smp_processor_id(), qdev->workqueue, &qdev->mpi_work, 0); work_done++; } /* * Get the bit-mask that shows the active queues for this * pass. Compare it to the queues that this irq services * and call napi if there's a match. */ var = ql_read32(qdev, ISR1); if (var & intr_context->irq_mask) { netif_info(qdev, intr, qdev->ndev, "Waking handler for rx_ring[0].\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); napi_schedule(&rx_ring->napi); work_done++; } ql_enable_completion_interrupt(qdev, intr_context->intr); return work_done ? IRQ_HANDLED : IRQ_NONE; } static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) { if (skb_is_gso(skb)) { int err; if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); mac_iocb_ptr->total_hdrs_len = cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); mac_iocb_ptr->net_trans_offset = cpu_to_le16(skb_network_offset(skb) | skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; if (likely(skb->protocol == htons(ETH_P_IP))) { struct iphdr *iph = ip_hdr(skb); iph->check = 0; mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); } else if (skb->protocol == htons(ETH_P_IPV6)) { mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } return 1; } return 0; } static void ql_hw_csum_setup(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) { int len; struct iphdr *iph = ip_hdr(skb); __sum16 *check; mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); mac_iocb_ptr->net_trans_offset = cpu_to_le16(skb_network_offset(skb) | skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; len = (ntohs(iph->tot_len) - (iph->ihl << 2)); if (likely(iph->protocol == IPPROTO_TCP)) { check = &(tcp_hdr(skb)->check); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; mac_iocb_ptr->total_hdrs_len = cpu_to_le16(skb_transport_offset(skb) + (tcp_hdr(skb)->doff << 2)); } else { check = &(udp_hdr(skb)->check); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; mac_iocb_ptr->total_hdrs_len = cpu_to_le16(skb_transport_offset(skb) + sizeof(struct udphdr)); } *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, len, iph->protocol, 0); } static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) { struct tx_ring_desc *tx_ring_desc; struct ob_mac_iocb_req *mac_iocb_ptr; struct ql_adapter *qdev = netdev_priv(ndev); int tso; struct tx_ring *tx_ring; u32 tx_ring_idx = (u32) skb->queue_mapping; tx_ring = &qdev->tx_ring[tx_ring_idx]; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { netif_info(qdev, tx_queued, qdev->ndev, "%s: shutting down tx queue %d du to lack of resources.\n", __func__, tx_ring_idx); netif_stop_subqueue(ndev, tx_ring->wq_id); atomic_inc(&tx_ring->queue_stopped); tx_ring->tx_errors++; return NETDEV_TX_BUSY; } tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; mac_iocb_ptr = tx_ring_desc->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; mac_iocb_ptr->tid = tx_ring_desc->index; /* We use the upper 32-bits to store the tx queue for this IO. * When we get the completion we can use it to establish the context. */ mac_iocb_ptr->txq_idx = tx_ring_idx; tx_ring_desc->skb = skb; mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); if (vlan_tx_tag_present(skb)) { netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); } tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { ql_hw_csum_setup(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); } if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) { netif_err(qdev, tx_queued, qdev->ndev, "Could not map the segments.\n"); tx_ring->tx_errors++; return NETDEV_TX_BUSY; } QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); tx_ring->prod_idx++; if (tx_ring->prod_idx == tx_ring->wq_len) tx_ring->prod_idx = 0; wmb(); ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "tx queued, slot %d, len %d\n", tx_ring->prod_idx, skb->len); atomic_dec(&tx_ring->tx_count); return NETDEV_TX_OK; } static void ql_free_shadow_space(struct ql_adapter *qdev) { if (qdev->rx_ring_shadow_reg_area) { pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->rx_ring_shadow_reg_area, qdev->rx_ring_shadow_reg_dma); qdev->rx_ring_shadow_reg_area = NULL; } if (qdev->tx_ring_shadow_reg_area) { pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->tx_ring_shadow_reg_area, qdev->tx_ring_shadow_reg_dma); qdev->tx_ring_shadow_reg_area = NULL; } } static int ql_alloc_shadow_space(struct ql_adapter *qdev) { qdev->rx_ring_shadow_reg_area = pci_alloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); if (qdev->rx_ring_shadow_reg_area == NULL) { netif_err(qdev, ifup, qdev->ndev, "Allocation of RX shadow space failed.\n"); return -ENOMEM; } memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); qdev->tx_ring_shadow_reg_area = pci_alloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->tx_ring_shadow_reg_dma); if (qdev->tx_ring_shadow_reg_area == NULL) { netif_err(qdev, ifup, qdev->ndev, "Allocation of TX shadow space failed.\n"); goto err_wqp_sh_area; } memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); return 0; err_wqp_sh_area: pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->rx_ring_shadow_reg_area, qdev->rx_ring_shadow_reg_dma); return -ENOMEM; } static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) { struct tx_ring_desc *tx_ring_desc; int i; struct ob_mac_iocb_req *mac_iocb_ptr; mac_iocb_ptr = tx_ring->wq_base; tx_ring_desc = tx_ring->q; for (i = 0; i < tx_ring->wq_len; i++) { tx_ring_desc->index = i; tx_ring_desc->skb = NULL; tx_ring_desc->queue_entry = mac_iocb_ptr; mac_iocb_ptr++; tx_ring_desc++; } atomic_set(&tx_ring->tx_count, tx_ring->wq_len); atomic_set(&tx_ring->queue_stopped, 0); } static void ql_free_tx_resources(struct ql_adapter *qdev, struct tx_ring *tx_ring) { if (tx_ring->wq_base) { pci_free_consistent(qdev->pdev, tx_ring->wq_size, tx_ring->wq_base, tx_ring->wq_base_dma); tx_ring->wq_base = NULL; } kfree(tx_ring->q); tx_ring->q = NULL; } static int ql_alloc_tx_resources(struct ql_adapter *qdev, struct tx_ring *tx_ring) { tx_ring->wq_base = pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, &tx_ring->wq_base_dma); if ((tx_ring->wq_base == NULL) || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); return -ENOMEM; } tx_ring->q = kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); if (tx_ring->q == NULL) goto err; return 0; err: pci_free_consistent(qdev->pdev, tx_ring->wq_size, tx_ring->wq_base, tx_ring->wq_base_dma); return -ENOMEM; } static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { struct bq_desc *lbq_desc; uint32_t curr_idx, clean_idx; curr_idx = rx_ring->lbq_curr_idx; clean_idx = rx_ring->lbq_clean_idx; while (curr_idx != clean_idx) { lbq_desc = &rx_ring->lbq[curr_idx]; if (lbq_desc->p.pg_chunk.last_flag) { pci_unmap_page(qdev->pdev, lbq_desc->p.pg_chunk.map, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); lbq_desc->p.pg_chunk.last_flag = 0; } put_page(lbq_desc->p.pg_chunk.page); lbq_desc->p.pg_chunk.page = NULL; if (++curr_idx == rx_ring->lbq_len) curr_idx = 0; } } static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *sbq_desc; for (i = 0; i < rx_ring->sbq_len; i++) { sbq_desc = &rx_ring->sbq[i]; if (sbq_desc == NULL) { netif_err(qdev, ifup, qdev->ndev, "sbq_desc %d is NULL.\n", i); return; } if (sbq_desc->p.skb) { pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); dev_kfree_skb(sbq_desc->p.skb); sbq_desc->p.skb = NULL; } } } /* Free all large and small rx buffers associated * with the completion queues for this device. */ static void ql_free_rx_buffers(struct ql_adapter *qdev) { int i; struct rx_ring *rx_ring; for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; if (rx_ring->lbq) ql_free_lbq_buffers(qdev, rx_ring); if (rx_ring->sbq) ql_free_sbq_buffers(qdev, rx_ring); } } static void ql_alloc_rx_buffers(struct ql_adapter *qdev) { struct rx_ring *rx_ring; int i; for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; if (rx_ring->type != TX_Q) ql_update_buffer_queues(qdev, rx_ring); } } static void ql_init_lbq_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *lbq_desc; __le64 *bq = rx_ring->lbq_base; memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); for (i = 0; i < rx_ring->lbq_len; i++) { lbq_desc = &rx_ring->lbq[i]; memset(lbq_desc, 0, sizeof(*lbq_desc)); lbq_desc->index = i; lbq_desc->addr = bq; bq++; } } static void ql_init_sbq_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *sbq_desc; __le64 *bq = rx_ring->sbq_base; memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); for (i = 0; i < rx_ring->sbq_len; i++) { sbq_desc = &rx_ring->sbq[i]; memset(sbq_desc, 0, sizeof(*sbq_desc)); sbq_desc->index = i; sbq_desc->addr = bq; bq++; } } static void ql_free_rx_resources(struct ql_adapter *qdev, struct rx_ring *rx_ring) { /* Free the small buffer queue. */ if (rx_ring->sbq_base) { pci_free_consistent(qdev->pdev, rx_ring->sbq_size, rx_ring->sbq_base, rx_ring->sbq_base_dma); rx_ring->sbq_base = NULL; } /* Free the small buffer queue control blocks. */ kfree(rx_ring->sbq); rx_ring->sbq = NULL; /* Free the large buffer queue. */ if (rx_ring->lbq_base) { pci_free_consistent(qdev->pdev, rx_ring->lbq_size, rx_ring->lbq_base, rx_ring->lbq_base_dma); rx_ring->lbq_base = NULL; } /* Free the large buffer queue control blocks. */ kfree(rx_ring->lbq); rx_ring->lbq = NULL; /* Free the rx queue. */ if (rx_ring->cq_base) { pci_free_consistent(qdev->pdev, rx_ring->cq_size, rx_ring->cq_base, rx_ring->cq_base_dma); rx_ring->cq_base = NULL; } } /* Allocate queues and buffers for this completions queue based * on the values in the parameter structure. */ static int ql_alloc_rx_resources(struct ql_adapter *qdev, struct rx_ring *rx_ring) { /* * Allocate the completion queue for this rx_ring. */ rx_ring->cq_base = pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, &rx_ring->cq_base_dma); if (rx_ring->cq_base == NULL) { netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); return -ENOMEM; } if (rx_ring->sbq_len) { /* * Allocate small buffer queue. */ rx_ring->sbq_base = pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, &rx_ring->sbq_base_dma); if (rx_ring->sbq_base == NULL) { netif_err(qdev, ifup, qdev->ndev, "Small buffer queue allocation failed.\n"); goto err_mem; } /* * Allocate small buffer queue control blocks. */ rx_ring->sbq = kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), GFP_KERNEL); if (rx_ring->sbq == NULL) { netif_err(qdev, ifup, qdev->ndev, "Small buffer queue control block allocation failed.\n"); goto err_mem; } ql_init_sbq_ring(qdev, rx_ring); } if (rx_ring->lbq_len) { /* * Allocate large buffer queue. */ rx_ring->lbq_base = pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, &rx_ring->lbq_base_dma); if (rx_ring->lbq_base == NULL) { netif_err(qdev, ifup, qdev->ndev, "Large buffer queue allocation failed.\n"); goto err_mem; } /* * Allocate large buffer queue control blocks. */ rx_ring->lbq = kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), GFP_KERNEL); if (rx_ring->lbq == NULL) { netif_err(qdev, ifup, qdev->ndev, "Large buffer queue control block allocation failed.\n"); goto err_mem; } ql_init_lbq_ring(qdev, rx_ring); } return 0; err_mem: ql_free_rx_resources(qdev, rx_ring); return -ENOMEM; } static void ql_tx_ring_clean(struct ql_adapter *qdev) { struct tx_ring *tx_ring; struct tx_ring_desc *tx_ring_desc; int i, j; /* * Loop through all queues and free * any resources. */ for (j = 0; j < qdev->tx_ring_count; j++) { tx_ring = &qdev->tx_ring[j]; for (i = 0; i < tx_ring->wq_len; i++) { tx_ring_desc = &tx_ring->q[i]; if (tx_ring_desc && tx_ring_desc->skb) { netif_err(qdev, ifdown, qdev->ndev, "Freeing lost SKB %p, from queue %d, index %d.\n", tx_ring_desc->skb, j, tx_ring_desc->index); ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); dev_kfree_skb(tx_ring_desc->skb); tx_ring_desc->skb = NULL; } } } } static void ql_free_mem_resources(struct ql_adapter *qdev) { int i; for (i = 0; i < qdev->tx_ring_count; i++) ql_free_tx_resources(qdev, &qdev->tx_ring[i]); for (i = 0; i < qdev->rx_ring_count; i++) ql_free_rx_resources(qdev, &qdev->rx_ring[i]); ql_free_shadow_space(qdev); } static int ql_alloc_mem_resources(struct ql_adapter *qdev) { int i; /* Allocate space for our shadow registers and such. */ if (ql_alloc_shadow_space(qdev)) return -ENOMEM; for (i = 0; i < qdev->rx_ring_count; i++) { if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { netif_err(qdev, ifup, qdev->ndev, "RX resource allocation failed.\n"); goto err_mem; } } /* Allocate tx queue resources */ for (i = 0; i < qdev->tx_ring_count; i++) { if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { netif_err(qdev, ifup, qdev->ndev, "TX resource allocation failed.\n"); goto err_mem; } } return 0; err_mem: ql_free_mem_resources(qdev); return -ENOMEM; } /* Set up the rx ring control block and pass it to the chip. * The control block is defined as * "Completion Queue Initialization Control Block", or cqicb. */ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) { struct cqicb *cqicb = &rx_ring->cqicb; void *shadow_reg = qdev->rx_ring_shadow_reg_area + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); void __iomem *doorbell_area = qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); int err = 0; u16 bq_len; u64 tmp; __le64 *base_indirect_ptr; int page_entries; /* Set up the shadow registers for this ring. */ rx_ring->prod_idx_sh_reg = shadow_reg; rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; *rx_ring->prod_idx_sh_reg = 0; shadow_reg += sizeof(u64); shadow_reg_dma += sizeof(u64); rx_ring->lbq_base_indirect = shadow_reg; rx_ring->lbq_base_indirect_dma = shadow_reg_dma; shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); rx_ring->sbq_base_indirect = shadow_reg; rx_ring->sbq_base_indirect_dma = shadow_reg_dma; /* PCI doorbell mem area + 0x00 for consumer index register */ rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; rx_ring->cnsmr_idx = 0; rx_ring->curr_entry = rx_ring->cq_base; /* PCI doorbell mem area + 0x04 for valid register */ rx_ring->valid_db_reg = doorbell_area + 0x04; /* PCI doorbell mem area + 0x18 for large buffer consumer */ rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); /* PCI doorbell mem area + 0x1c */ rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); memset((void *)cqicb, 0, sizeof(struct cqicb)); cqicb->msix_vect = rx_ring->irq; bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); /* * Set up the control block load flags. */ cqicb->flags = FLAGS_LC | /* Load queue base address */ FLAGS_LV | /* Load MSI-X vector */ FLAGS_LI; /* Load irq delay values */ if (rx_ring->lbq_len) { cqicb->flags |= FLAGS_LL; /* Load lbq values */ tmp = (u64)rx_ring->lbq_base_dma; base_indirect_ptr = rx_ring->lbq_base_indirect; page_entries = 0; do { *base_indirect_ptr = cpu_to_le64(tmp); tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq_base_indirect_dma); bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : (u16) rx_ring->lbq_buf_size; cqicb->lbq_buf_size = cpu_to_le16(bq_len); bq_len = (rx_ring->lbq_len == 65536) ? 0 : (u16) rx_ring->lbq_len; cqicb->lbq_len = cpu_to_le16(bq_len); rx_ring->lbq_prod_idx = 0; rx_ring->lbq_curr_idx = 0; rx_ring->lbq_clean_idx = 0; rx_ring->lbq_free_cnt = rx_ring->lbq_len; } if (rx_ring->sbq_len) { cqicb->flags |= FLAGS_LS; /* Load sbq values */ tmp = (u64)rx_ring->sbq_base_dma; base_indirect_ptr = rx_ring->sbq_base_indirect; page_entries = 0; do { *base_indirect_ptr = cpu_to_le64(tmp); tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq_base_indirect_dma); cqicb->sbq_buf_size = cpu_to_le16((u16)(rx_ring->sbq_buf_size)); bq_len = (rx_ring->sbq_len == 65536) ? 0 : (u16) rx_ring->sbq_len; cqicb->sbq_len = cpu_to_le16(bq_len); rx_ring->sbq_prod_idx = 0; rx_ring->sbq_curr_idx = 0; rx_ring->sbq_clean_idx = 0; rx_ring->sbq_free_cnt = rx_ring->sbq_len; } switch (rx_ring->type) { case TX_Q: cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); break; case RX_Q: /* Inbound completion handling rx_rings run in * separate NAPI contexts. */ netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, 64); cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); break; default: netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Invalid rx_ring->type = %d.\n", rx_ring->type); } err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), CFG_LCQ, rx_ring->cq_id); if (err) { netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); return err; } return err; } static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) { struct wqicb *wqicb = (struct wqicb *)tx_ring; void __iomem *doorbell_area = qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); void *shadow_reg = qdev->tx_ring_shadow_reg_area + (tx_ring->wq_id * sizeof(u64)); u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + (tx_ring->wq_id * sizeof(u64)); int err = 0; /* * Assign doorbell registers for this tx_ring. */ /* TX PCI doorbell mem area for tx producer index */ tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; tx_ring->prod_idx = 0; /* TX PCI doorbell mem area + 0x04 */ tx_ring->valid_db_reg = doorbell_area + 0x04; /* * Assign shadow registers for this tx_ring. */ tx_ring->cnsmr_idx_sh_reg = shadow_reg; tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); wqicb->flags = cpu_to_le16(Q_FLAGS_LC | Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); wqicb->rid = 0; wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); ql_init_tx_ring(qdev, tx_ring); err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, (u16) tx_ring->wq_id); if (err) { netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); return err; } return err; } static void ql_disable_msix(struct ql_adapter *qdev) { if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { pci_disable_msix(qdev->pdev); clear_bit(QL_MSIX_ENABLED, &qdev->flags); kfree(qdev->msi_x_entry); qdev->msi_x_entry = NULL; } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { pci_disable_msi(qdev->pdev); clear_bit(QL_MSI_ENABLED, &qdev->flags); } } /* We start by trying to get the number of vectors * stored in qdev->intr_count. If we don't get that * many then we reduce the count and try again. */ static void ql_enable_msix(struct ql_adapter *qdev) { int i, err; /* Get the MSIX vectors. */ if (qlge_irq_type == MSIX_IRQ) { /* Try to alloc space for the msix struct, * if it fails then go to MSI/legacy. */ qdev->msi_x_entry = kcalloc(qdev->intr_count, sizeof(struct msix_entry), GFP_KERNEL); if (!qdev->msi_x_entry) { qlge_irq_type = MSI_IRQ; goto msi; } for (i = 0; i < qdev->intr_count; i++) qdev->msi_x_entry[i].entry = i; /* Loop to get our vectors. We start with * what we want and settle for what we get. */ do { err = pci_enable_msix(qdev->pdev, qdev->msi_x_entry, qdev->intr_count); if (err > 0) qdev->intr_count = err; } while (err > 0); if (err < 0) { kfree(qdev->msi_x_entry); qdev->msi_x_entry = NULL; netif_warn(qdev, ifup, qdev->ndev, "MSI-X Enable failed, trying MSI.\n"); qdev->intr_count = 1; qlge_irq_type = MSI_IRQ; } else if (err == 0) { set_bit(QL_MSIX_ENABLED, &qdev->flags); netif_info(qdev, ifup, qdev->ndev, "MSI-X Enabled, got %d vectors.\n", qdev->intr_count); return; } } msi: qdev->intr_count = 1; if (qlge_irq_type == MSI_IRQ) { if (!pci_enable_msi(qdev->pdev)) { set_bit(QL_MSI_ENABLED, &qdev->flags); netif_info(qdev, ifup, qdev->ndev, "Running with MSI interrupts.\n"); return; } } qlge_irq_type = LEG_IRQ; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Running with legacy interrupts.\n"); } /* Each vector services 1 RSS ring and and 1 or more * TX completion rings. This function loops through * the TX completion rings and assigns the vector that * will service it. An example would be if there are * 2 vectors (so 2 RSS rings) and 8 TX completion rings. * This would mean that vector 0 would service RSS ring 0 * and TX completion rings 0,1,2 and 3. Vector 1 would * service RSS ring 1 and TX completion rings 4,5,6 and 7. */ static void ql_set_tx_vect(struct ql_adapter *qdev) { int i, j, vect; u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { /* Assign irq vectors to TX rx_rings.*/ for (vect = 0, j = 0, i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { if (j == tx_rings_per_vector) { vect++; j = 0; } qdev->rx_ring[i].irq = vect; j++; } } else { /* For single vector all rings have an irq * of zero. */ for (i = 0; i < qdev->rx_ring_count; i++) qdev->rx_ring[i].irq = 0; } } /* Set the interrupt mask for this vector. Each vector * will service 1 RSS ring and 1 or more TX completion * rings. This function sets up a bit mask per vector * that indicates which rings it services. */ static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) { int j, vect = ctx->intr; u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { /* Add the RSS ring serviced by this vector * to the mask. */ ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); /* Add the TX ring(s) serviced by this vector * to the mask. */ for (j = 0; j < tx_rings_per_vector; j++) { ctx->irq_mask |= (1 << qdev->rx_ring[qdev->rss_ring_count + (vect * tx_rings_per_vector) + j].cq_id); } } else { /* For single vector we just shift each queue's * ID into the mask. */ for (j = 0; j < qdev->rx_ring_count; j++) ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); } } /* * Here we build the intr_context structures based on * our rx_ring count and intr vector count. * The intr_context structure is used to hook each vector * to possibly different handlers. */ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) { int i = 0; struct intr_context *intr_context = &qdev->intr_context[0]; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { /* Each rx_ring has it's * own intr_context since we have separate * vectors for each queue. */ for (i = 0; i < qdev->intr_count; i++, intr_context++) { qdev->rx_ring[i].irq = i; intr_context->intr = i; intr_context->qdev = qdev; /* Set up this vector's bit-mask that indicates * which queues it services. */ ql_set_irq_mask(qdev, intr_context); /* * We set up each vectors enable/disable/read bits so * there's no bit/mask calculations in the critical path. */ intr_context->intr_en_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD | i; intr_context->intr_dis_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | INTR_EN_IHD | i; intr_context->intr_read_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | i; if (i == 0) { /* The first vector/queue handles * broadcast/multicast, fatal errors, * and firmware events. This in addition * to normal inbound NAPI processing. */ intr_context->handler = qlge_isr; sprintf(intr_context->name, "%s-rx-%d", qdev->ndev->name, i); } else { /* * Inbound queues handle unicast frames only. */ intr_context->handler = qlge_msix_rx_isr; sprintf(intr_context->name, "%s-rx-%d", qdev->ndev->name, i); } } } else { /* * All rx_rings use the same intr_context since * there is only one vector. */ intr_context->intr = 0; intr_context->qdev = qdev; /* * We set up each vectors enable/disable/read bits so * there's no bit/mask calculations in the critical path. */ intr_context->intr_en_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; intr_context->intr_dis_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_DISABLE; intr_context->intr_read_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; /* * Single interrupt means one handler for all rings. */ intr_context->handler = qlge_isr; sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); /* Set up this vector's bit-mask that indicates * which queues it services. In this case there is * a single vector so it will service all RSS and * TX completion rings. */ ql_set_irq_mask(qdev, intr_context); } /* Tell the TX completion rings which MSIx vector * they will be using. */ ql_set_tx_vect(qdev); } static void ql_free_irq(struct ql_adapter *qdev) { int i; struct intr_context *intr_context = &qdev->intr_context[0]; for (i = 0; i < qdev->intr_count; i++, intr_context++) { if (intr_context->hooked) { if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { free_irq(qdev->msi_x_entry[i].vector, &qdev->rx_ring[i]); } else { free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); } } } ql_disable_msix(qdev); } static int ql_request_irq(struct ql_adapter *qdev) { int i; int status = 0; struct pci_dev *pdev = qdev->pdev; struct intr_context *intr_context = &qdev->intr_context[0]; ql_resolve_queues_to_irqs(qdev); for (i = 0; i < qdev->intr_count; i++, intr_context++) { atomic_set(&intr_context->irq_cnt, 0); if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { status = request_irq(qdev->msi_x_entry[i].vector, intr_context->handler, 0, intr_context->name, &qdev->rx_ring[i]); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed request for MSIX interrupt %d.\n", i); goto err_irq; } } else { netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "trying msi or legacy interrupts.\n"); netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "%s: irq = %d.\n", __func__, pdev->irq); netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "%s: context->name = %s.\n", __func__, intr_context->name); netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "%s: dev_id = 0x%p.\n", __func__, &qdev->rx_ring[0]); status = request_irq(pdev->irq, qlge_isr, test_bit(QL_MSI_ENABLED, &qdev-> flags) ? 0 : IRQF_SHARED, intr_context->name, &qdev->rx_ring[0]); if (status) goto err_irq; netif_err(qdev, ifup, qdev->ndev, "Hooked intr %d, queue type %s, with name %s.\n", i, qdev->rx_ring[0].type == DEFAULT_Q ? "DEFAULT_Q" : qdev->rx_ring[0].type == TX_Q ? "TX_Q" : qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", intr_context->name); } intr_context->hooked = 1; } return status; err_irq: netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); ql_free_irq(qdev); return status; } static int ql_start_rss(struct ql_adapter *qdev) { static const u8 init_hash_seed[] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; struct ricb *ricb = &qdev->ricb; int status = 0; int i; u8 *hash_id = (u8 *) ricb->hash_cq_id; memset((void *)ricb, 0, sizeof(*ricb)); ricb->base_cq = RSS_L4K; ricb->flags = (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); ricb->mask = cpu_to_le16((u16)(0x3ff)); /* * Fill out the Indirection Table. */ for (i = 0; i < 1024; i++) hash_id[i] = (i & (qdev->rss_ring_count - 1)); memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); return status; } return status; } static int ql_clear_routing_entries(struct ql_adapter *qdev) { int i, status = 0; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; /* Clear all the entries in the routing table. */ for (i = 0; i < 16; i++) { status = ql_set_routing_reg(qdev, i, 0, 0); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for CAM packets.\n"); break; } } ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } /* Initialize the frame-to-queue routing. */ static int ql_route_initialize(struct ql_adapter *qdev) { int status = 0; /* Clear all the entries in the routing table. */ status = ql_clear_routing_entries(qdev); if (status) return status; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, RT_IDX_IP_CSUM_ERR, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register " "for IP CSUM error packets.\n"); goto exit; } status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, RT_IDX_TU_CSUM_ERR, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register " "for TCP/UDP CSUM error packets.\n"); goto exit; } status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for broadcast packets.\n"); goto exit; } /* If we have more than one inbound queue, then turn on RSS in the * routing block. */ if (qdev->rss_ring_count > 1) { status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, RT_IDX_RSS_MATCH, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for MATCH RSS packets.\n"); goto exit; } } status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, RT_IDX_CAM_HIT, 1); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for CAM packets.\n"); exit: ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } int ql_cam_route_initialize(struct ql_adapter *qdev) { int status, set; /* If check if the link is up and use to * determine if we are setting or clearing * the MAC address in the CAM. */ set = ql_read32(qdev, STS); set &= qdev->port_link_up; status = ql_set_mac_addr(qdev, set); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); return status; } status = ql_route_initialize(qdev); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); return status; } static int ql_adapter_initialize(struct ql_adapter *qdev) { u32 value, mask; int i; int status = 0; /* * Set up the System register to halt on errors. */ value = SYS_EFE | SYS_FAE; mask = value << 16; ql_write32(qdev, SYS, mask | value); /* Set the default queue, and VLAN behavior. */ value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); ql_write32(qdev, NIC_RCV_CFG, (mask | value)); /* Set the MPI interrupt to enabled. */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); /* Enable the function, set pagesize, enable error checking. */ value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | FSC_EC | FSC_VM_PAGE_4K; value |= SPLT_SETTING; /* Set/clear header splitting. */ mask = FSC_VM_PAGESIZE_MASK | FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); ql_write32(qdev, FSC, mask | value); ql_write32(qdev, SPLT_HDR, SPLT_LEN); /* Set RX packet routing to use port/pci function on which the * packet arrived on in addition to usual frame routing. * This is helpful on bonding where both interfaces can have * the same MAC address. */ ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); /* Reroute all packets to our Interface. * They may have been routed to MPI firmware * due to WOL. */ value = ql_read32(qdev, MGMT_RCV_CFG); value &= ~MGMT_RCV_CFG_RM; mask = 0xffff0000; /* Sticky reg needs clearing due to WOL. */ ql_write32(qdev, MGMT_RCV_CFG, mask); ql_write32(qdev, MGMT_RCV_CFG, mask | value); /* Default WOL is enable on Mezz cards */ if (qdev->pdev->subsystem_device == 0x0068 || qdev->pdev->subsystem_device == 0x0180) qdev->wol = WAKE_MAGIC; /* Start up the rx queues. */ for (i = 0; i < qdev->rx_ring_count; i++) { status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to start rx ring[%d].\n", i); return status; } } /* If there is more than one inbound completion queue * then download a RICB to configure RSS. */ if (qdev->rss_ring_count > 1) { status = ql_start_rss(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); return status; } } /* Start up the tx queues. */ for (i = 0; i < qdev->tx_ring_count; i++) { status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to start tx ring[%d].\n", i); return status; } } /* Initialize the port and set the max framesize. */ status = qdev->nic_ops->port_initialize(qdev); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); /* Set up the MAC address and frame routing filter. */ status = ql_cam_route_initialize(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init CAM/Routing tables.\n"); return status; } /* Start NAPI for the RSS queues. */ for (i = 0; i < qdev->rss_ring_count; i++) napi_enable(&qdev->rx_ring[i].napi); return status; } /* Issue soft reset to chip. */ static int ql_adapter_reset(struct ql_adapter *qdev) { u32 value; int status = 0; unsigned long end_jiffies; /* Clear all the entries in the routing table. */ status = ql_clear_routing_entries(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); return status; } end_jiffies = jiffies + max((unsigned long)1, usecs_to_jiffies(30)); /* Check if bit is set then skip the mailbox command and * clear the bit, else we are in normal reset process. */ if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { /* Stop management traffic. */ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); /* Wait for the NIC and MGMNT FIFOs to empty. */ ql_wait_fifo_empty(qdev); } else clear_bit(QL_ASIC_RECOVERY, &qdev->flags); ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); do { value = ql_read32(qdev, RST_FO); if ((value & RST_FO_FR) == 0) break; cpu_relax(); } while (time_before(jiffies, end_jiffies)); if (value & RST_FO_FR) { netif_err(qdev, ifdown, qdev->ndev, "ETIMEDOUT!!! errored out of resetting the chip!\n"); status = -ETIMEDOUT; } /* Resume management traffic. */ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); return status; } static void ql_display_dev_info(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); netif_info(qdev, probe, qdev->ndev, "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " "XG Roll = %d, XG Rev = %d.\n", qdev->func, qdev->port, qdev->chip_rev_id & 0x0000000f, qdev->chip_rev_id >> 4 & 0x0000000f, qdev->chip_rev_id >> 8 & 0x0000000f, qdev->chip_rev_id >> 12 & 0x0000000f); netif_info(qdev, probe, qdev->ndev, "MAC address %pM\n", ndev->dev_addr); } static int ql_wol(struct ql_adapter *qdev) { int status = 0; u32 wol = MB_WOL_DISABLE; /* The CAM is still intact after a reset, but if we * are doing WOL, then we may need to program the * routing regs. We would also need to issue the mailbox * commands to instruct the MPI what to do per the ethtool * settings. */ if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) { netif_err(qdev, ifdown, qdev->ndev, "Unsupported WOL paramter. qdev->wol = 0x%x.\n", qdev->wol); return -EINVAL; } if (qdev->wol & WAKE_MAGIC) { status = ql_mb_wol_set_magic(qdev, 1); if (status) { netif_err(qdev, ifdown, qdev->ndev, "Failed to set magic packet on %s.\n", qdev->ndev->name); return status; } else netif_info(qdev, drv, qdev->ndev, "Enabled magic packet successfully on %s.\n", qdev->ndev->name); wol |= MB_WOL_MAGIC_PKT; } if (qdev->wol) { wol |= MB_WOL_MODE_ON; status = ql_mb_wol_mode(qdev, wol); netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x) on %s\n", (status == 0) ? "Successfully set" : "Failed", wol, qdev->ndev->name); } return status; } static void ql_cancel_all_work_sync(struct ql_adapter *qdev) { /* Don't kill the reset worker thread if we * are in the process of recovery. */ if (test_bit(QL_ADAPTER_UP, &qdev->flags)) cancel_delayed_work_sync(&qdev->asic_reset_work); cancel_delayed_work_sync(&qdev->mpi_reset_work); cancel_delayed_work_sync(&qdev->mpi_work); cancel_delayed_work_sync(&qdev->mpi_idc_work); cancel_delayed_work_sync(&qdev->mpi_core_to_log); cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); } static int ql_adapter_down(struct ql_adapter *qdev) { int i, status = 0; ql_link_off(qdev); ql_cancel_all_work_sync(qdev); for (i = 0; i < qdev->rss_ring_count; i++) napi_disable(&qdev->rx_ring[i].napi); clear_bit(QL_ADAPTER_UP, &qdev->flags); ql_disable_interrupts(qdev); ql_tx_ring_clean(qdev); /* Call netif_napi_del() from common point. */ for (i = 0; i < qdev->rss_ring_count; i++) netif_napi_del(&qdev->rx_ring[i].napi); status = ql_adapter_reset(qdev); if (status) netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", qdev->func); ql_free_rx_buffers(qdev); return status; } static int ql_adapter_up(struct ql_adapter *qdev) { int err = 0; err = ql_adapter_initialize(qdev); if (err) { netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); goto err_init; } set_bit(QL_ADAPTER_UP, &qdev->flags); ql_alloc_rx_buffers(qdev); /* If the port is initialized and the * link is up the turn on the carrier. */ if ((ql_read32(qdev, STS) & qdev->port_init) && (ql_read32(qdev, STS) & qdev->port_link_up)) ql_link_on(qdev); /* Restore rx mode. */ clear_bit(QL_ALLMULTI, &qdev->flags); clear_bit(QL_PROMISCUOUS, &qdev->flags); qlge_set_multicast_list(qdev->ndev); /* Restore vlan setting. */ qlge_restore_vlan(qdev); ql_enable_interrupts(qdev); ql_enable_all_completion_interrupts(qdev); netif_tx_start_all_queues(qdev->ndev); return 0; err_init: ql_adapter_reset(qdev); return err; } static void ql_release_adapter_resources(struct ql_adapter *qdev) { ql_free_mem_resources(qdev); ql_free_irq(qdev); } static int ql_get_adapter_resources(struct ql_adapter *qdev) { int status = 0; if (ql_alloc_mem_resources(qdev)) { netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); return -ENOMEM; } status = ql_request_irq(qdev); return status; } static int qlge_close(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); /* If we hit pci_channel_io_perm_failure * failure condition, then we already * brought the adapter down. */ if (test_bit(QL_EEH_FATAL, &qdev->flags)) { netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); clear_bit(QL_EEH_FATAL, &qdev->flags); return 0; } /* * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) msleep(1); ql_adapter_down(qdev); ql_release_adapter_resources(qdev); return 0; } static int ql_configure_rings(struct ql_adapter *qdev) { int i; struct rx_ring *rx_ring; struct tx_ring *tx_ring; int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; qdev->lbq_buf_order = get_order(lbq_buf_len); /* In a perfect world we have one RSS ring for each CPU * and each has it's own vector. To do that we ask for * cpu_cnt vectors. ql_enable_msix() will adjust the * vector count to what we actually get. We then * allocate an RSS ring for each. * Essentially, we are doing min(cpu_count, msix_vector_count). */ qdev->intr_count = cpu_cnt; ql_enable_msix(qdev); /* Adjust the RSS ring count to the actual vector count. */ qdev->rss_ring_count = qdev->intr_count; qdev->tx_ring_count = cpu_cnt; qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; for (i = 0; i < qdev->tx_ring_count; i++) { tx_ring = &qdev->tx_ring[i]; memset((void *)tx_ring, 0, sizeof(*tx_ring)); tx_ring->qdev = qdev; tx_ring->wq_id = i; tx_ring->wq_len = qdev->tx_ring_size; tx_ring->wq_size = tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); /* * The completion queue ID for the tx rings start * immediately after the rss rings. */ tx_ring->cq_id = qdev->rss_ring_count + i; } for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; memset((void *)rx_ring, 0, sizeof(*rx_ring)); rx_ring->qdev = qdev; rx_ring->cq_id = i; rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ if (i < qdev->rss_ring_count) { /* * Inbound (RSS) queues. */ rx_ring->cq_len = qdev->rx_ring_size; rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring->lbq_len = NUM_LARGE_BUFFERS; rx_ring->lbq_size = rx_ring->lbq_len * sizeof(__le64); rx_ring->lbq_buf_size = (u16)lbq_buf_len; rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring->sbq_size = rx_ring->sbq_len * sizeof(__le64); rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; rx_ring->type = RX_Q; } else { /* * Outbound queue handles outbound completions only. */ /* outbound cq is same size as tx_ring it services. */ rx_ring->cq_len = qdev->tx_ring_size; rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring->lbq_len = 0; rx_ring->lbq_size = 0; rx_ring->lbq_buf_size = 0; rx_ring->sbq_len = 0; rx_ring->sbq_size = 0; rx_ring->sbq_buf_size = 0; rx_ring->type = TX_Q; } } return 0; } static int qlge_open(struct net_device *ndev) { int err = 0; struct ql_adapter *qdev = netdev_priv(ndev); err = ql_adapter_reset(qdev); if (err) return err; err = ql_configure_rings(qdev); if (err) return err; err = ql_get_adapter_resources(qdev); if (err) goto error_up; err = ql_adapter_up(qdev); if (err) goto error_up; return err; error_up: ql_release_adapter_resources(qdev); return err; } static int ql_change_rx_buffers(struct ql_adapter *qdev) { struct rx_ring *rx_ring; int i, status; u32 lbq_buf_len; /* Wait for an outstanding reset to complete. */ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { int i = 3; while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { netif_err(qdev, ifup, qdev->ndev, "Waiting for adapter UP...\n"); ssleep(1); } if (!i) { netif_err(qdev, ifup, qdev->ndev, "Timed out waiting for adapter UP\n"); return -ETIMEDOUT; } } status = ql_adapter_down(qdev); if (status) goto error; /* Get the new rx buffer size. */ lbq_buf_len = (qdev->ndev->mtu > 1500) ? LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; qdev->lbq_buf_order = get_order(lbq_buf_len); for (i = 0; i < qdev->rss_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; /* Set the new size. */ rx_ring->lbq_buf_size = lbq_buf_len; } status = ql_adapter_up(qdev); if (status) goto error; return status; error: netif_alert(qdev, ifup, qdev->ndev, "Driver up/down cycle failed, closing device.\n"); set_bit(QL_ADAPTER_UP, &qdev->flags); dev_close(qdev->ndev); return status; } static int qlge_change_mtu(struct net_device *ndev, int new_mtu) { struct ql_adapter *qdev = netdev_priv(ndev); int status; if (ndev->mtu == 1500 && new_mtu == 9000) { netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); } else if (ndev->mtu == 9000 && new_mtu == 1500) { netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); } else return -EINVAL; queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 3*HZ); ndev->mtu = new_mtu; if (!netif_running(qdev->ndev)) { return 0; } status = ql_change_rx_buffers(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Changing MTU failed.\n"); } return status; } static struct net_device_stats *qlge_get_stats(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); struct rx_ring *rx_ring = &qdev->rx_ring[0]; struct tx_ring *tx_ring = &qdev->tx_ring[0]; unsigned long pkts, mcast, dropped, errors, bytes; int i; /* Get RX stats. */ pkts = mcast = dropped = errors = bytes = 0; for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { pkts += rx_ring->rx_packets; bytes += rx_ring->rx_bytes; dropped += rx_ring->rx_dropped; errors += rx_ring->rx_errors; mcast += rx_ring->rx_multicast; } ndev->stats.rx_packets = pkts; ndev->stats.rx_bytes = bytes; ndev->stats.rx_dropped = dropped; ndev->stats.rx_errors = errors; ndev->stats.multicast = mcast; /* Get TX stats. */ pkts = errors = bytes = 0; for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { pkts += tx_ring->tx_packets; bytes += tx_ring->tx_bytes; errors += tx_ring->tx_errors; } ndev->stats.tx_packets = pkts; ndev->stats.tx_bytes = bytes; ndev->stats.tx_errors = errors; return &ndev->stats; } static void qlge_set_multicast_list(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); struct netdev_hw_addr *ha; int i, status; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return; /* * Set or clear promiscuous mode if a * transition is taking place. */ if (ndev->flags & IFF_PROMISC) { if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { netif_err(qdev, hw, qdev->ndev, "Failed to set promiscuous mode.\n"); } else { set_bit(QL_PROMISCUOUS, &qdev->flags); } } } else { if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { netif_err(qdev, hw, qdev->ndev, "Failed to clear promiscuous mode.\n"); } else { clear_bit(QL_PROMISCUOUS, &qdev->flags); } } } /* * Set or clear all multicast mode if a * transition is taking place. */ if ((ndev->flags & IFF_ALLMULTI) || (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { if (!test_bit(QL_ALLMULTI, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { netif_err(qdev, hw, qdev->ndev, "Failed to set all-multi mode.\n"); } else { set_bit(QL_ALLMULTI, &qdev->flags); } } } else { if (test_bit(QL_ALLMULTI, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { netif_err(qdev, hw, qdev->ndev, "Failed to clear all-multi mode.\n"); } else { clear_bit(QL_ALLMULTI, &qdev->flags); } } } if (!netdev_mc_empty(ndev)) { status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) goto exit; i = 0; netdev_for_each_mc_addr(ha, ndev) { if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, MAC_ADDR_TYPE_MULTI_MAC, i)) { netif_err(qdev, hw, qdev->ndev, "Failed to loadmulticast address.\n"); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); goto exit; } i++; } ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); if (ql_set_routing_reg (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { netif_err(qdev, hw, qdev->ndev, "Failed to set multicast match mode.\n"); } else { set_bit(QL_ALLMULTI, &qdev->flags); } } exit: ql_sem_unlock(qdev, SEM_RT_IDX_MASK); } static int qlge_set_mac_address(struct net_device *ndev, void *p) { struct ql_adapter *qdev = netdev_priv(ndev); struct sockaddr *addr = p; int status; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); /* Update local copy of current mac address. */ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); if (status) netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return status; } static void qlge_tx_timeout(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); ql_queue_asic_error(qdev); } static void ql_asic_reset_work(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, asic_reset_work.work); int status; rtnl_lock(); status = ql_adapter_down(qdev); if (status) goto error; status = ql_adapter_up(qdev); if (status) goto error; /* Restore rx mode. */ clear_bit(QL_ALLMULTI, &qdev->flags); clear_bit(QL_PROMISCUOUS, &qdev->flags); qlge_set_multicast_list(qdev->ndev); rtnl_unlock(); return; error: netif_alert(qdev, ifup, qdev->ndev, "Driver up/down cycle failed, closing device\n"); set_bit(QL_ADAPTER_UP, &qdev->flags); dev_close(qdev->ndev); rtnl_unlock(); } static const struct nic_operations qla8012_nic_ops = { .get_flash = ql_get_8012_flash_params, .port_initialize = ql_8012_port_initialize, }; static const struct nic_operations qla8000_nic_ops = { .get_flash = ql_get_8000_flash_params, .port_initialize = ql_8000_port_initialize, }; /* Find the pcie function number for the other NIC * on this chip. Since both NIC functions share a * common firmware we have the lowest enabled function * do any common work. Examples would be resetting * after a fatal firmware error, or doing a firmware * coredump. */ static int ql_get_alt_pcie_func(struct ql_adapter *qdev) { int status = 0; u32 temp; u32 nic_func1, nic_func2; status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, &temp); if (status) return status; nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & MPI_TEST_NIC_FUNC_MASK); nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & MPI_TEST_NIC_FUNC_MASK); if (qdev->func == nic_func1) qdev->alt_func = nic_func2; else if (qdev->func == nic_func2) qdev->alt_func = nic_func1; else status = -EIO; return status; } static int ql_get_board_info(struct ql_adapter *qdev) { int status; qdev->func = (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; if (qdev->func > 3) return -EIO; status = ql_get_alt_pcie_func(qdev); if (status) return status; qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; if (qdev->port) { qdev->xg_sem_mask = SEM_XGMAC1_MASK; qdev->port_link_up = STS_PL1; qdev->port_init = STS_PI1; qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; } else { qdev->xg_sem_mask = SEM_XGMAC0_MASK; qdev->port_link_up = STS_PL0; qdev->port_init = STS_PI0; qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; } qdev->chip_rev_id = ql_read32(qdev, REV_ID); qdev->device_id = qdev->pdev->device; if (qdev->device_id == QLGE_DEVICE_ID_8012) qdev->nic_ops = &qla8012_nic_ops; else if (qdev->device_id == QLGE_DEVICE_ID_8000) qdev->nic_ops = &qla8000_nic_ops; return status; } static void ql_release_all(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); if (qdev->workqueue) { destroy_workqueue(qdev->workqueue); qdev->workqueue = NULL; } if (qdev->reg_base) iounmap(qdev->reg_base); if (qdev->doorbell_area) iounmap(qdev->doorbell_area); vfree(qdev->mpi_coredump); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); } static int __devinit ql_init_device(struct pci_dev *pdev, struct net_device *ndev, int cards_found) { struct ql_adapter *qdev = netdev_priv(ndev); int err = 0; memset((void *)qdev, 0, sizeof(*qdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "PCI device enable failed.\n"); return err; } qdev->ndev = ndev; qdev->pdev = pdev; pci_set_drvdata(pdev, ndev); /* Set PCIe read request size */ err = pcie_set_readrq(pdev, 4096); if (err) { dev_err(&pdev->dev, "Set readrq failed.\n"); goto err_out1; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "PCI region request failed.\n"); return err; } pci_set_master(pdev); if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { set_bit(QL_DMA64, &qdev->flags); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration.\n"); goto err_out2; } /* Set PCIe reset type for EEH to fundamental. */ pdev->needs_freset = 1; pci_save_state(pdev); qdev->reg_base = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!qdev->reg_base) { dev_err(&pdev->dev, "Register mapping failed.\n"); err = -ENOMEM; goto err_out2; } qdev->doorbell_area_size = pci_resource_len(pdev, 3); qdev->doorbell_area = ioremap_nocache(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3)); if (!qdev->doorbell_area) { dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); err = -ENOMEM; goto err_out2; } err = ql_get_board_info(qdev); if (err) { dev_err(&pdev->dev, "Register access failed.\n"); err = -EIO; goto err_out2; } qdev->msg_enable = netif_msg_init(debug, default_msg); spin_lock_init(&qdev->hw_lock); spin_lock_init(&qdev->stats_lock); if (qlge_mpi_coredump) { qdev->mpi_coredump = vmalloc(sizeof(struct ql_mpi_coredump)); if (qdev->mpi_coredump == NULL) { dev_err(&pdev->dev, "Coredump alloc failed.\n"); err = -ENOMEM; goto err_out2; } if (qlge_force_coredump) set_bit(QL_FRC_COREDUMP, &qdev->flags); } /* make sure the EEPROM is good */ err = qdev->nic_ops->get_flash(qdev); if (err) { dev_err(&pdev->dev, "Invalid FLASH.\n"); goto err_out2; } memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); /* Keep local copy of current mac address. */ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); /* Set up the default ring sizes. */ qdev->tx_ring_size = NUM_TX_RING_ENTRIES; qdev->rx_ring_size = NUM_RX_RING_ENTRIES; /* Set up the coalescing parameters. */ qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; /* * Set up the operating parameters. */ qdev->workqueue = create_singlethread_workqueue(ndev->name); INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); init_completion(&qdev->ide_completion); mutex_init(&qdev->mpi_mutex); if (!cards_found) { dev_info(&pdev->dev, "%s\n", DRV_STRING); dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", DRV_NAME, DRV_VERSION); } return 0; err_out2: ql_release_all(pdev); err_out1: pci_disable_device(pdev); return err; } static const struct net_device_ops qlge_netdev_ops = { .ndo_open = qlge_open, .ndo_stop = qlge_close, .ndo_start_xmit = qlge_send, .ndo_change_mtu = qlge_change_mtu, .ndo_get_stats = qlge_get_stats, .ndo_set_rx_mode = qlge_set_multicast_list, .ndo_set_mac_address = qlge_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = qlge_tx_timeout, .ndo_fix_features = qlge_fix_features, .ndo_set_features = qlge_set_features, .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, }; static void ql_timer(unsigned long data) { struct ql_adapter *qdev = (struct ql_adapter *)data; u32 var = 0; var = ql_read32(qdev, STS); if (pci_channel_offline(qdev->pdev)) { netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); return; } mod_timer(&qdev->timer, jiffies + (5*HZ)); } static int __devinit qlge_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { struct net_device *ndev = NULL; struct ql_adapter *qdev = NULL; static int cards_found = 0; int err = 0; ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), min(MAX_CPUS, (int)num_online_cpus())); if (!ndev) return -ENOMEM; err = ql_init_device(pdev, ndev, cards_found); if (err < 0) { free_netdev(ndev); return err; } qdev = netdev_priv(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; if (test_bit(QL_DMA64, &qdev->flags)) ndev->features |= NETIF_F_HIGHDMA; /* * Set up net_device structure. */ ndev->tx_queue_len = qdev->tx_ring_size; ndev->irq = pdev->irq; ndev->netdev_ops = &qlge_netdev_ops; SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); ndev->watchdog_timeo = 10 * HZ; err = register_netdev(ndev); if (err) { dev_err(&pdev->dev, "net device registration failed.\n"); ql_release_all(pdev); pci_disable_device(pdev); return err; } /* Start up the timer to trigger EEH if * the bus goes dead */ init_timer_deferrable(&qdev->timer); qdev->timer.data = (unsigned long)qdev; qdev->timer.function = ql_timer; qdev->timer.expires = jiffies + (5*HZ); add_timer(&qdev->timer); ql_link_off(qdev); ql_display_dev_info(ndev); atomic_set(&qdev->lb_count, 0); cards_found++; return 0; } netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) { return qlge_send(skb, ndev); } int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) { return ql_clean_inbound_rx_ring(rx_ring, budget); } static void __devexit qlge_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); del_timer_sync(&qdev->timer); ql_cancel_all_work_sync(qdev); unregister_netdev(ndev); ql_release_all(pdev); pci_disable_device(pdev); free_netdev(ndev); } /* Clean up resources without touching hardware. */ static void ql_eeh_close(struct net_device *ndev) { int i; struct ql_adapter *qdev = netdev_priv(ndev); if (netif_carrier_ok(ndev)) { netif_carrier_off(ndev); netif_stop_queue(ndev); } /* Disabling the timer */ del_timer_sync(&qdev->timer); ql_cancel_all_work_sync(qdev); for (i = 0; i < qdev->rss_ring_count; i++) netif_napi_del(&qdev->rx_ring[i].napi); clear_bit(QL_ADAPTER_UP, &qdev->flags); ql_tx_ring_clean(qdev); ql_free_rx_buffers(qdev); ql_release_adapter_resources(qdev); } /* * This callback is called by the PCI subsystem whenever * a PCI bus error is detected. */ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: netif_device_detach(ndev); if (netif_running(ndev)) ql_eeh_close(ndev); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: dev_err(&pdev->dev, "%s: pci_channel_io_perm_failure.\n", __func__); ql_eeh_close(ndev); set_bit(QL_EEH_FATAL, &qdev->flags); return PCI_ERS_RESULT_DISCONNECT; } /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /* * This callback is called after the PCI buss has been reset. * Basically, this tries to restart the card from scratch. * This is a shortened version of the device probe/discovery code, * it resembles the first-half of the () routine. */ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); pdev->error_state = pci_channel_io_normal; pci_restore_state(pdev); if (pci_enable_device(pdev)) { netif_err(qdev, ifup, qdev->ndev, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); if (ql_adapter_reset(qdev)) { netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); set_bit(QL_EEH_FATAL, &qdev->flags); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_RECOVERED; } static void qlge_io_resume(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); int err = 0; if (netif_running(ndev)) { err = qlge_open(ndev); if (err) { netif_err(qdev, ifup, qdev->ndev, "Device initialization failed after reset.\n"); return; } } else { netif_err(qdev, ifup, qdev->ndev, "Device was not running prior to EEH.\n"); } mod_timer(&qdev->timer, jiffies + (5*HZ)); netif_device_attach(ndev); } static struct pci_error_handlers qlge_err_handler = { .error_detected = qlge_io_error_detected, .slot_reset = qlge_io_slot_reset, .resume = qlge_io_resume, }; static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); int err; netif_device_detach(ndev); del_timer_sync(&qdev->timer); if (netif_running(ndev)) { err = ql_adapter_down(qdev); if (!err) return err; } ql_wol(qdev); err = pci_save_state(pdev); if (err) return err; pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } #ifdef CONFIG_PM static int qlge_resume(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); int err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); if (netif_running(ndev)) { err = ql_adapter_up(qdev); if (err) return err; } mod_timer(&qdev->timer, jiffies + (5*HZ)); netif_device_attach(ndev); return 0; } #endif /* CONFIG_PM */ static void qlge_shutdown(struct pci_dev *pdev) { qlge_suspend(pdev, PMSG_SUSPEND); } static struct pci_driver qlge_driver = { .name = DRV_NAME, .id_table = qlge_pci_tbl, .probe = qlge_probe, .remove = __devexit_p(qlge_remove), #ifdef CONFIG_PM .suspend = qlge_suspend, .resume = qlge_resume, #endif .shutdown = qlge_shutdown, .err_handler = &qlge_err_handler }; static int __init qlge_init_module(void) { return pci_register_driver(&qlge_driver); } static void __exit qlge_exit(void) { pci_unregister_driver(&qlge_driver); } module_init(qlge_init_module); module_exit(qlge_exit);
gpl-2.0
starlightknight/android_kernel_samsung_matissewifi
arch/arm/common/via82c505.c
4807
1902
#include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/io.h> #include <asm/mach/pci.h> #define MAX_SLOTS 7 #define CONFIG_CMD(bus, devfn, where) (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) static int via82c505_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { outl(CONFIG_CMD(bus,devfn,where),0xCF8); switch (size) { case 1: *value=inb(0xCFC + (where&3)); break; case 2: *value=inw(0xCFC + (where&2)); break; case 4: *value=inl(0xCFC); break; } return PCIBIOS_SUCCESSFUL; } static int via82c505_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { outl(CONFIG_CMD(bus,devfn,where),0xCF8); switch (size) { case 1: outb(value, 0xCFC + (where&3)); break; case 2: outw(value, 0xCFC + (where&2)); break; case 4: outl(value, 0xCFC); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops via82c505_ops = { .read = via82c505_read_config, .write = via82c505_write_config, }; void __init via82c505_preinit(void) { printk(KERN_DEBUG "PCI: VIA 82c505\n"); if (!request_region(0xA8,2,"via config")) { printk(KERN_WARNING"VIA 82c505: Unable to request region 0xA8\n"); return; } if (!request_region(0xCF8,8,"pci config")) { printk(KERN_WARNING"VIA 82c505: Unable to request region 0xCF8\n"); release_region(0xA8, 2); return; } /* Enable compatible Mode */ outb(0x96,0xA8); outb(0x18,0xA9); outb(0x93,0xA8); outb(0xd0,0xA9); } int __init via82c505_setup(int nr, struct pci_sys_data *sys) { return (nr == 0); } struct pci_bus * __init via82c505_scan_bus(int nr, struct pci_sys_data *sysdata) { if (nr == 0) return pci_scan_root_bus(NULL, 0, &via82c505_ops, sysdata, &sysdata->resources); return NULL; }
gpl-2.0
Buckmarble/Elite_M8
sound/soc/imx/eukrea-tlv320.c
4807
4633
/* * eukrea-tlv320.c -- SoC audio for eukrea_cpuimxXX in I2S mode * * Copyright 2010 Eric Bénard, Eukréa Electromatique <eric@eukrea.com> * * based on sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c * which is Copyright 2009 Simtec Electronics * and on sound/soc/imx/phycore-ac97.c which is * Copyright 2009 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include "../codecs/tlv320aic23.h" #include "imx-ssi.h" #include "imx-audmux.h" #define CODEC_CLOCK 12000000 static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret; ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret) { pr_err("%s: failed set cpu dai format\n", __func__); return ret; } ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret) { pr_err("%s: failed set codec dai format\n", __func__); return ret; } ret = snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_OUT); if (ret) { pr_err("%s: failed setting codec sysclk\n", __func__); return ret; } snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0); ret = snd_soc_dai_set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0, SND_SOC_CLOCK_IN); if (ret) { pr_err("can't set CPU system clock IMX_SSP_SYS_CLK\n"); return ret; } return 0; } static struct snd_soc_ops eukrea_tlv320_snd_ops = { .hw_params = eukrea_tlv320_hw_params, }; static struct snd_soc_dai_link eukrea_tlv320_dai = { .name = "tlv320aic23", .stream_name = "TLV320AIC23", .codec_dai_name = "tlv320aic23-hifi", .platform_name = "imx-fiq-pcm-audio.0", .codec_name = "tlv320aic23-codec.0-001a", .cpu_dai_name = "imx-ssi.0", .ops = &eukrea_tlv320_snd_ops, }; static struct snd_soc_card eukrea_tlv320 = { .name = "cpuimx-audio", .owner = THIS_MODULE, .dai_link = &eukrea_tlv320_dai, .num_links = 1, }; static struct platform_device *eukrea_tlv320_snd_device; static int __init eukrea_tlv320_init(void) { int ret; int int_port = 0, ext_port; if (machine_is_eukrea_cpuimx27()) { imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, IMX_AUDMUX_V1_PCR_SYN | IMX_AUDMUX_V1_PCR_TFSDIR | IMX_AUDMUX_V1_PCR_TCLKDIR | IMX_AUDMUX_V1_PCR_RFSDIR | IMX_AUDMUX_V1_PCR_RCLKDIR | IMX_AUDMUX_V1_PCR_TFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) | IMX_AUDMUX_V1_PCR_RFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) | IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) ); imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR3_SSI_PINS_4, IMX_AUDMUX_V1_PCR_SYN | IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0) ); } else if (machine_is_eukrea_cpuimx25sd() || machine_is_eukrea_cpuimx35sd() || machine_is_eukrea_cpuimx51sd()) { ext_port = machine_is_eukrea_cpuimx25sd() ? 4 : 3; imx_audmux_v2_configure_port(int_port, IMX_AUDMUX_V2_PTCR_SYN | IMX_AUDMUX_V2_PTCR_TFSDIR | IMX_AUDMUX_V2_PTCR_TFSEL(ext_port) | IMX_AUDMUX_V2_PTCR_TCLKDIR | IMX_AUDMUX_V2_PTCR_TCSEL(ext_port), IMX_AUDMUX_V2_PDCR_RXDSEL(ext_port) ); imx_audmux_v2_configure_port(ext_port, IMX_AUDMUX_V2_PTCR_SYN, IMX_AUDMUX_V2_PDCR_RXDSEL(int_port) ); } else { /* return happy. We might run on a totally different machine */ return 0; } eukrea_tlv320_snd_device = platform_device_alloc("soc-audio", -1); if (!eukrea_tlv320_snd_device) return -ENOMEM; platform_set_drvdata(eukrea_tlv320_snd_device, &eukrea_tlv320); ret = platform_device_add(eukrea_tlv320_snd_device); if (ret) { printk(KERN_ERR "ASoC: Platform device allocation failed\n"); platform_device_put(eukrea_tlv320_snd_device); } return ret; } static void __exit eukrea_tlv320_exit(void) { platform_device_unregister(eukrea_tlv320_snd_device); } module_init(eukrea_tlv320_init); module_exit(eukrea_tlv320_exit); MODULE_AUTHOR("Eric Bénard <eric@eukrea.com>"); MODULE_DESCRIPTION("CPUIMX ALSA SoC driver"); MODULE_LICENSE("GPL");
gpl-2.0
crdroid-devices/android_kernel_htc_msm8960
drivers/gpio/gpio-ep93xx.c
4807
10928
/* * Generic EP93xx GPIO handling * * Copyright (c) 2008 Ryan Mallon * Copyright (c) 2011 H Hartley Sweeten <hsweeten@visionengravers.com> * * Based on code originally from: * linux/arch/arm/mach-ep93xx/core.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/basic_mmio_gpio.h> #include <mach/hardware.h> #include <mach/gpio-ep93xx.h> #define irq_to_gpio(irq) ((irq) - gpio_to_irq(0)) struct ep93xx_gpio { void __iomem *mmio_base; struct bgpio_chip bgc[8]; }; /************************************************************************* * Interrupt handling for EP93xx on-chip GPIOs *************************************************************************/ static unsigned char gpio_int_unmasked[3]; static unsigned char gpio_int_enabled[3]; static unsigned char gpio_int_type1[3]; static unsigned char gpio_int_type2[3]; static unsigned char gpio_int_debounce[3]; /* Port ordering is: A B F */ static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c }; static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 }; static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; static void ep93xx_gpio_update_int_params(unsigned port) { BUG_ON(port > 2); __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port])); __raw_writeb(gpio_int_type2[port], EP93XX_GPIO_REG(int_type2_register_offset[port])); __raw_writeb(gpio_int_type1[port], EP93XX_GPIO_REG(int_type1_register_offset[port])); __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port], EP93XX_GPIO_REG(int_en_register_offset[port])); } static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) { int line = irq_to_gpio(irq); int port = line >> 3; int port_mask = 1 << (line & 7); if (enable) gpio_int_debounce[port] |= port_mask; else gpio_int_debounce[port] &= ~port_mask; __raw_writeb(gpio_int_debounce[port], EP93XX_GPIO_REG(int_debounce_register_offset[port])); } static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned char status; int i; status = __raw_readb(EP93XX_GPIO_A_INT_STATUS); for (i = 0; i < 8; i++) { if (status & (1 << i)) { int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i; generic_handle_irq(gpio_irq); } } status = __raw_readb(EP93XX_GPIO_B_INT_STATUS); for (i = 0; i < 8; i++) { if (status & (1 << i)) { int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i; generic_handle_irq(gpio_irq); } } } static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc) { /* * map discontiguous hw irq range to continuous sw irq range: * * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7}) */ int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */ int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx; generic_handle_irq(gpio_irq); } static void ep93xx_gpio_irq_ack(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; int port_mask = 1 << (line & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { gpio_int_type2[port] ^= port_mask; /* switch edge direction */ ep93xx_gpio_update_int_params(port); } __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); } static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; int port_mask = 1 << (line & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) gpio_int_type2[port] ^= port_mask; /* switch edge direction */ gpio_int_unmasked[port] &= ~port_mask; ep93xx_gpio_update_int_params(port); __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); } static void ep93xx_gpio_irq_mask(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; gpio_int_unmasked[port] &= ~(1 << (line & 7)); ep93xx_gpio_update_int_params(port); } static void ep93xx_gpio_irq_unmask(struct irq_data *d) { int line = irq_to_gpio(d->irq); int port = line >> 3; gpio_int_unmasked[port] |= 1 << (line & 7); ep93xx_gpio_update_int_params(port); } /* * gpio_int_type1 controls whether the interrupt is level (0) or * edge (1) triggered, while gpio_int_type2 controls whether it * triggers on low/falling (0) or high/rising (1). */ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) { const int gpio = irq_to_gpio(d->irq); const int port = gpio >> 3; const int port_mask = 1 << (gpio & 7); irq_flow_handler_t handler; gpio_direction_input(gpio); switch (type) { case IRQ_TYPE_EDGE_RISING: gpio_int_type1[port] |= port_mask; gpio_int_type2[port] |= port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_EDGE_FALLING: gpio_int_type1[port] |= port_mask; gpio_int_type2[port] &= ~port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: gpio_int_type1[port] &= ~port_mask; gpio_int_type2[port] |= port_mask; handler = handle_level_irq; break; case IRQ_TYPE_LEVEL_LOW: gpio_int_type1[port] &= ~port_mask; gpio_int_type2[port] &= ~port_mask; handler = handle_level_irq; break; case IRQ_TYPE_EDGE_BOTH: gpio_int_type1[port] |= port_mask; /* set initial polarity based on current input level */ if (gpio_get_value(gpio)) gpio_int_type2[port] &= ~port_mask; /* falling */ else gpio_int_type2[port] |= port_mask; /* rising */ handler = handle_edge_irq; break; default: return -EINVAL; } __irq_set_handler_locked(d->irq, handler); gpio_int_enabled[port] |= port_mask; ep93xx_gpio_update_int_params(port); return 0; } static struct irq_chip ep93xx_gpio_irq_chip = { .name = "GPIO", .irq_ack = ep93xx_gpio_irq_ack, .irq_mask_ack = ep93xx_gpio_irq_mask_ack, .irq_mask = ep93xx_gpio_irq_mask, .irq_unmask = ep93xx_gpio_irq_unmask, .irq_set_type = ep93xx_gpio_irq_type, }; static void ep93xx_gpio_init_irq(void) { int gpio_irq; for (gpio_irq = gpio_to_irq(0); gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip, handle_level_irq); set_irq_flags(gpio_irq, IRQF_VALID); } irq_set_chained_handler(IRQ_EP93XX_GPIO_AB, ep93xx_gpio_ab_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX, ep93xx_gpio_f_irq_handler); irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX, ep93xx_gpio_f_irq_handler); } /************************************************************************* * gpiolib interface for EP93xx on-chip GPIOs *************************************************************************/ struct ep93xx_gpio_bank { const char *label; int data; int dir; int base; bool has_debounce; }; #define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _debounce) \ { \ .label = _label, \ .data = _data, \ .dir = _dir, \ .base = _base, \ .has_debounce = _debounce, \ } static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = { EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true), EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true), EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false), EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false), EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false), EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true), EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false), EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false), }; static int ep93xx_gpio_set_debounce(struct gpio_chip *chip, unsigned offset, unsigned debounce) { int gpio = chip->base + offset; int irq = gpio_to_irq(gpio); if (irq < 0) return -EINVAL; ep93xx_gpio_int_debounce(irq, debounce ? true : false); return 0; } /* * Map GPIO A0..A7 (0..7) to irq 64..71, * B0..B7 (7..15) to irq 72..79, and * F0..F7 (16..24) to irq 80..87. */ static int ep93xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { int gpio = chip->base + offset; if (gpio > EP93XX_GPIO_LINE_MAX_IRQ) return -EINVAL; return 64 + gpio; } static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev, void __iomem *mmio_base, struct ep93xx_gpio_bank *bank) { void __iomem *data = mmio_base + bank->data; void __iomem *dir = mmio_base + bank->dir; int err; err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, false); if (err) return err; bgc->gc.label = bank->label; bgc->gc.base = bank->base; if (bank->has_debounce) { bgc->gc.set_debounce = ep93xx_gpio_set_debounce; bgc->gc.to_irq = ep93xx_gpio_to_irq; } return gpiochip_add(&bgc->gc); } static int __devinit ep93xx_gpio_probe(struct platform_device *pdev) { struct ep93xx_gpio *ep93xx_gpio; struct resource *res; void __iomem *mmio; int i; int ret; ep93xx_gpio = kzalloc(sizeof(*ep93xx_gpio), GFP_KERNEL); if (!ep93xx_gpio) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENXIO; goto exit_free; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { ret = -EBUSY; goto exit_free; } mmio = ioremap(res->start, resource_size(res)); if (!mmio) { ret = -ENXIO; goto exit_release; } ep93xx_gpio->mmio_base = mmio; for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i]; struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; if (ep93xx_gpio_add_bank(bgc, &pdev->dev, mmio, bank)) dev_warn(&pdev->dev, "Unable to add gpio bank %s\n", bank->label); } ep93xx_gpio_init_irq(); return 0; exit_release: release_mem_region(res->start, resource_size(res)); exit_free: kfree(ep93xx_gpio); dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, ret); return ret; } static struct platform_driver ep93xx_gpio_driver = { .driver = { .name = "gpio-ep93xx", .owner = THIS_MODULE, }, .probe = ep93xx_gpio_probe, }; static int __init ep93xx_gpio_init(void) { return platform_driver_register(&ep93xx_gpio_driver); } postcore_initcall(ep93xx_gpio_init); MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com> " "H Hartley Sweeten <hsweeten@visionengravers.com>"); MODULE_DESCRIPTION("EP93XX GPIO driver"); MODULE_LICENSE("GPL");
gpl-2.0
ZdrowyGosciu/kernel_g900f
drivers/staging/wlags49_h2/wl_cs.c
4807
13646
/******************************************************************************* * Agere Systems Inc. * Wireless device driver for Linux (wlags49). * * Copyright (c) 1998-2003 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Initially developed by TriplePoint, Inc. * http://www.triplepoint.com * *------------------------------------------------------------------------------ * * This file contains processing and initialization specific to Card Services * devices (PCMCIA, CF). * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright (c) 2003 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ******************************************************************************/ /******************************************************************************* * include files ******************************************************************************/ #include <wl_version.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/bitops.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/module.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <debug.h> #include <hcf.h> #include <dhf.h> #include <hcfdef.h> #include <wl_if.h> #include <wl_internal.h> #include <wl_util.h> #include <wl_main.h> #include <wl_netdev.h> #include <wl_cs.h> #include <wl_sysfs.h> /******************************************************************************* * global definitions ******************************************************************************/ #if DBG extern dbg_info_t *DbgInfo; #endif /* DBG */ /******************************************************************************* * wl_adapter_attach() ******************************************************************************* * * DESCRIPTION: * * Creates an instance of the driver, allocating local data structures for * one device. The device is registered with Card Services. * * PARAMETERS: * * none * * RETURNS: * * pointer to an allocated dev_link_t structure * NULL on failure * ******************************************************************************/ static int wl_adapter_attach(struct pcmcia_device *link) { struct net_device *dev; struct wl_private *lp; /*--------------------------------------------------------------------*/ DBG_FUNC("wl_adapter_attach"); DBG_ENTER(DbgInfo); dev = wl_device_alloc(); if (dev == NULL) { DBG_ERROR(DbgInfo, "wl_device_alloc returned NULL\n"); return -ENOMEM; } link->resource[0]->end = HCF_NUM_IO_PORTS; link->resource[0]->flags= IO_DATA_PATH_WIDTH_16; link->config_flags |= CONF_ENABLE_IRQ; link->config_index = 5; link->config_regs = PRESENT_OPTION; link->priv = dev; lp = wl_priv(dev); lp->link = link; wl_adapter_insert(link); DBG_LEAVE(DbgInfo); return 0; } /* wl_adapter_attach */ /*============================================================================*/ static void wl_adapter_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; /*--------------------------------------------------------------------*/ DBG_FUNC("wl_adapter_detach"); DBG_ENTER(DbgInfo); DBG_PARAM(DbgInfo, "link", "0x%p", link); wl_adapter_release(link); if (dev) { unregister_wlags_sysfs(dev); unregister_netdev(dev); } wl_device_dealloc(dev); DBG_LEAVE(DbgInfo); } /* wl_adapter_detach */ /*============================================================================*/ void wl_adapter_release(struct pcmcia_device *link) { DBG_FUNC("wl_adapter_release"); DBG_ENTER(DbgInfo); DBG_PARAM(DbgInfo, "link", "0x%p", link); /* Stop hardware */ wl_remove(link->priv); pcmcia_disable_device(link); DBG_LEAVE(DbgInfo); } /* wl_adapter_release */ /*============================================================================*/ static int wl_adapter_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; /* if (link->open) { */ netif_device_detach(dev); wl_suspend(dev); /* CHECK! pcmcia_release_configuration(link->handle); */ /* } */ return 0; } /* wl_adapter_suspend */ static int wl_adapter_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; wl_resume(dev); netif_device_attach(dev); return 0; } /* wl_adapter_resume */ void wl_adapter_insert(struct pcmcia_device *link) { struct net_device *dev; int ret; /*--------------------------------------------------------------------*/ DBG_FUNC("wl_adapter_insert"); DBG_ENTER(DbgInfo); DBG_PARAM(DbgInfo, "link", "0x%p", link); dev = link->priv; /* Do we need to allocate an interrupt? */ link->config_flags |= CONF_ENABLE_IRQ; link->io_lines = 6; ret = pcmcia_request_io(link); if (ret != 0) goto failed; ret = pcmcia_request_irq(link, (void *) wl_isr); if (ret != 0) goto failed; ret = pcmcia_enable_device(link); if (ret != 0) goto failed; dev->irq = link->irq; dev->base_addr = link->resource[0]->start; SET_NETDEV_DEV(dev, &link->dev); if (register_netdev(dev) != 0) { printk("%s: register_netdev() failed\n", MODULE_NAME); goto failed; } register_wlags_sysfs(dev); printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, mac_address" " %pM\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr); DBG_LEAVE(DbgInfo); return; failed: wl_adapter_release(link); DBG_LEAVE(DbgInfo); return; } /* wl_adapter_insert */ /*============================================================================*/ /******************************************************************************* * wl_adapter_open() ******************************************************************************* * * DESCRIPTION: * * Open the device. * * PARAMETERS: * * dev - a pointer to a net_device structure representing the network * device to open. * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_adapter_open(struct net_device *dev) { struct wl_private *lp = wl_priv(dev); struct pcmcia_device *link = lp->link; int result = 0; int hcf_status = HCF_SUCCESS; /*--------------------------------------------------------------------*/ DBG_FUNC("wl_adapter_open"); DBG_ENTER(DbgInfo); DBG_PRINT("%s\n", VERSION_INFO); DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev); if (!pcmcia_dev_present(link)) { DBG_LEAVE(DbgInfo); return -ENODEV; } link->open++; hcf_status = wl_open(dev); if (hcf_status != HCF_SUCCESS) { link->open--; result = -ENODEV; } DBG_LEAVE(DbgInfo); return result; } /* wl_adapter_open */ /*============================================================================*/ /******************************************************************************* * wl_adapter_close() ******************************************************************************* * * DESCRIPTION: * * Close the device. * * PARAMETERS: * * dev - a pointer to a net_device structure representing the network * device to close. * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_adapter_close(struct net_device *dev) { struct wl_private *lp = wl_priv(dev); struct pcmcia_device *link = lp->link; /*--------------------------------------------------------------------*/ DBG_FUNC("wl_adapter_close"); DBG_ENTER(DbgInfo); DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev); if (link == NULL) { DBG_LEAVE(DbgInfo); return -ENODEV; } DBG_TRACE(DbgInfo, "%s: Shutting down adapter.\n", dev->name); wl_close(dev); link->open--; DBG_LEAVE(DbgInfo); return 0; } /* wl_adapter_close */ /*============================================================================*/ static const struct pcmcia_device_id wl_adapter_ids[] = { #if !((HCF_TYPE) & HCF_TYPE_HII5) PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0003), PCMCIA_DEVICE_PROD_ID12("Agere Systems", "Wireless PC Card Model 0110", 0x33103a9b, 0xe175b0dd), #else PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0004), PCMCIA_DEVICE_PROD_ID12("Linksys", "WCF54G_Wireless-G_CompactFlash_Card", 0x0733cc81, 0x98a599e1), #endif /* (HCF_TYPE) & HCF_TYPE_HII5 */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, wl_adapter_ids); static struct pcmcia_driver wlags49_driver = { .owner = THIS_MODULE, .name = DRIVER_NAME, .probe = wl_adapter_attach, .remove = wl_adapter_detach, .id_table = wl_adapter_ids, .suspend = wl_adapter_suspend, .resume = wl_adapter_resume, }; /******************************************************************************* * wl_adapter_init_module() ******************************************************************************* * * DESCRIPTION: * * Called by init_module() to perform PCMCIA driver initialization. * * PARAMETERS: * * N/A * * RETURNS: * * 0 on success * -1 on error * ******************************************************************************/ int wl_adapter_init_module(void) { int ret; /*--------------------------------------------------------------------*/ DBG_FUNC("wl_adapter_init_module"); DBG_ENTER(DbgInfo); DBG_TRACE(DbgInfo, "wl_adapter_init_module() -- PCMCIA\n"); ret = pcmcia_register_driver(&wlags49_driver); DBG_LEAVE(DbgInfo); return ret; } /* wl_adapter_init_module */ /*============================================================================*/ /******************************************************************************* * wl_adapter_cleanup_module() ******************************************************************************* * * DESCRIPTION: * * Called by cleanup_module() to perform driver uninitialization. * * PARAMETERS: * * N/A * * RETURNS: * * N/A * ******************************************************************************/ void wl_adapter_cleanup_module(void) { DBG_FUNC("wl_adapter_cleanup_module"); DBG_ENTER(DbgInfo); DBG_TRACE(DbgInfo, "wl_adapter_cleanup_module() -- PCMCIA\n"); pcmcia_unregister_driver(&wlags49_driver); DBG_LEAVE(DbgInfo); return; } /* wl_adapter_cleanup_module */ /*============================================================================*/ /******************************************************************************* * wl_adapter_is_open() ******************************************************************************* * * DESCRIPTION: * * Check with Card Services to determine if this device is open. * * PARAMETERS: * * dev - a pointer to the net_device structure whose open status will be * checked * * RETURNS: * * nonzero if device is open * 0 otherwise * ******************************************************************************/ int wl_adapter_is_open(struct net_device *dev) { struct wl_private *lp = wl_priv(dev); struct pcmcia_device *link = lp->link; if (!pcmcia_dev_present(link)) return 0; return link->open; } /* wl_adapter_is_open */ /*============================================================================*/
gpl-2.0
HeliumRom/android_kernel_nubia_nx507j
arch/arm/common/via82c505.c
4807
1902
#include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/io.h> #include <asm/mach/pci.h> #define MAX_SLOTS 7 #define CONFIG_CMD(bus, devfn, where) (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) static int via82c505_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { outl(CONFIG_CMD(bus,devfn,where),0xCF8); switch (size) { case 1: *value=inb(0xCFC + (where&3)); break; case 2: *value=inw(0xCFC + (where&2)); break; case 4: *value=inl(0xCFC); break; } return PCIBIOS_SUCCESSFUL; } static int via82c505_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { outl(CONFIG_CMD(bus,devfn,where),0xCF8); switch (size) { case 1: outb(value, 0xCFC + (where&3)); break; case 2: outw(value, 0xCFC + (where&2)); break; case 4: outl(value, 0xCFC); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops via82c505_ops = { .read = via82c505_read_config, .write = via82c505_write_config, }; void __init via82c505_preinit(void) { printk(KERN_DEBUG "PCI: VIA 82c505\n"); if (!request_region(0xA8,2,"via config")) { printk(KERN_WARNING"VIA 82c505: Unable to request region 0xA8\n"); return; } if (!request_region(0xCF8,8,"pci config")) { printk(KERN_WARNING"VIA 82c505: Unable to request region 0xCF8\n"); release_region(0xA8, 2); return; } /* Enable compatible Mode */ outb(0x96,0xA8); outb(0x18,0xA9); outb(0x93,0xA8); outb(0xd0,0xA9); } int __init via82c505_setup(int nr, struct pci_sys_data *sys) { return (nr == 0); } struct pci_bus * __init via82c505_scan_bus(int nr, struct pci_sys_data *sysdata) { if (nr == 0) return pci_scan_root_bus(NULL, 0, &via82c505_ops, sysdata, &sysdata->resources); return NULL; }
gpl-2.0
santod/KK_sense_kernel_htc_m7vzw
arch/s390/mm/pageattr.c
4807
1212
/* * Copyright IBM Corp. 2011 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/module.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> static void change_page_attr(unsigned long addr, int numpages, pte_t (*set) (pte_t)) { pte_t *ptep, pte; pmd_t *pmdp; pud_t *pudp; pgd_t *pgdp; int i; for (i = 0; i < numpages; i++) { pgdp = pgd_offset(&init_mm, addr); pudp = pud_offset(pgdp, addr); pmdp = pmd_offset(pudp, addr); if (pmd_huge(*pmdp)) { WARN_ON_ONCE(1); continue; } ptep = pte_offset_kernel(pmdp, addr); pte = *ptep; pte = set(pte); __ptep_ipte(addr, ptep); *ptep = pte; addr += PAGE_SIZE; } } int set_memory_ro(unsigned long addr, int numpages) { change_page_attr(addr, numpages, pte_wrprotect); return 0; } EXPORT_SYMBOL_GPL(set_memory_ro); int set_memory_rw(unsigned long addr, int numpages) { change_page_attr(addr, numpages, pte_mkwrite); return 0; } EXPORT_SYMBOL_GPL(set_memory_rw); /* not possible */ int set_memory_nx(unsigned long addr, int numpages) { return 0; } EXPORT_SYMBOL_GPL(set_memory_nx); int set_memory_x(unsigned long addr, int numpages) { return 0; }
gpl-2.0
lemon-pi/linux-3.x
arch/sh/kernel/time.c
7367
2636
/* * arch/sh/kernel/time.c * * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/profile.h> #include <linux/timex.h> #include <linux/sched.h> #include <linux/clockchips.h> #include <linux/platform_device.h> #include <linux/smp.h> #include <linux/rtc.h> #include <asm/clock.h> #include <asm/rtc.h> /* Dummy RTC ops */ static void null_rtc_get_time(struct timespec *tv) { tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0); tv->tv_nsec = 0; } static int null_rtc_set_time(const time_t secs) { return 0; } void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; void read_persistent_clock(struct timespec *ts) { rtc_sh_get_time(ts); } #ifdef CONFIG_GENERIC_CMOS_UPDATE int update_persistent_clock(struct timespec now) { return rtc_sh_set_time(now.tv_sec); } #endif unsigned int get_rtc_time(struct rtc_time *tm) { if (rtc_sh_get_time != null_rtc_get_time) { struct timespec tv; rtc_sh_get_time(&tv); rtc_time_to_tm(tv.tv_sec, tm); } return RTC_24H; } EXPORT_SYMBOL(get_rtc_time); int set_rtc_time(struct rtc_time *tm) { unsigned long secs; rtc_tm_to_time(tm, &secs); return rtc_sh_set_time(secs); } EXPORT_SYMBOL(set_rtc_time); static int __init rtc_generic_init(void) { struct platform_device *pdev; if (rtc_sh_get_time == null_rtc_get_time) return -ENODEV; pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } module_init(rtc_generic_init); void (*board_time_init)(void); static void __init sh_late_time_init(void) { /* * Make sure all compiled-in early timers register themselves. * * Run probe() for two "earlytimer" devices, these will be the * clockevents and clocksource devices respectively. In the event * that only a clockevents device is available, we -ENODEV on the * clocksource and the jiffies clocksource is used transparently * instead. No error handling is necessary here. */ early_platform_driver_register_all("earlytimer"); early_platform_driver_probe("earlytimer", 2, 0); } void __init time_init(void) { if (board_time_init) board_time_init(); clk_init(); late_time_init = sh_late_time_init; }
gpl-2.0